mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 02:49:32 +03:00
cstyle: Resolve C style issues
The vast majority of these changes are in Linux specific code. They are the result of not having an automated style checker to validate the code when it was originally written. Others were caused when the common code was slightly adjusted for Linux. This patch contains no functional changes. It only refreshes the code to conform to style guide. Everyone submitting patches for inclusion upstream should now run 'make checkstyle' and resolve any warning prior to opening a pull request. The automated builders have been updated to fail a build if when 'make checkstyle' detects an issue. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1821
This commit is contained in:
parent
8ffef572ed
commit
d1d7e2689d
@ -272,7 +272,7 @@ out:
|
||||
len = strlen(cwd);
|
||||
|
||||
/* Do not add one when cwd already ends in a trailing '/' */
|
||||
if (!strncmp(cwd, dataset, len))
|
||||
if (strncmp(cwd, dataset, len) == 0)
|
||||
return (dataset + len + (cwd[len-1] != '/'));
|
||||
|
||||
return (dataset);
|
||||
@ -501,7 +501,7 @@ main(int argc, char **argv)
|
||||
* using zfs as your root file system both rc.sysinit/umountroot and
|
||||
* systemd depend on 'mount -o remount <mountpoint>' to work.
|
||||
*/
|
||||
if (zfsutil && !strcmp(legacy, ZFS_MOUNTPOINT_LEGACY)) {
|
||||
if (zfsutil && (strcmp(legacy, ZFS_MOUNTPOINT_LEGACY) == 0)) {
|
||||
(void) fprintf(stderr, gettext(
|
||||
"filesystem '%s' cannot be mounted using 'zfs mount'.\n"
|
||||
"Use 'zfs set mountpoint=%s' or 'mount -t zfs %s %s'.\n"
|
||||
|
@ -165,7 +165,8 @@ usage(void)
|
||||
(void) fprintf(stderr, " -t <txg> -- highest txg to use when "
|
||||
"searching for uberblocks\n");
|
||||
(void) fprintf(stderr, " -M <number of inflight I/Os> -- "
|
||||
"specify the maximum number of checksumming I/Os [default is 200]\n");
|
||||
"specify the maximum number of checksumming I/Os "
|
||||
"[default is 200]\n");
|
||||
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
|
||||
"to make only that option verbose\n");
|
||||
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
|
||||
@ -1319,7 +1320,8 @@ dump_deadlist(dsl_deadlist_t *dl)
|
||||
dle = AVL_NEXT(&dl->dl_tree, dle)) {
|
||||
if (dump_opt['d'] >= 5) {
|
||||
char buf[128];
|
||||
(void) snprintf(buf, sizeof (buf), "mintxg %llu -> obj %llu",
|
||||
(void) snprintf(buf, sizeof (buf),
|
||||
"mintxg %llu -> obj %llu",
|
||||
(longlong_t)dle->dle_mintxg,
|
||||
(longlong_t)dle->dle_bpobj.bpo_object);
|
||||
|
||||
@ -1436,7 +1438,7 @@ dump_znode_sa_xattr(sa_handle_t *hdl)
|
||||
|
||||
(void) printf("\t\t%s = ", nvpair_name(elem));
|
||||
nvpair_value_byte_array(elem, &value, &cnt);
|
||||
for (idx = 0 ; idx < cnt ; ++idx) {
|
||||
for (idx = 0; idx < cnt; ++idx) {
|
||||
if (isprint(value[idx]))
|
||||
(void) putchar(value[idx]);
|
||||
else
|
||||
@ -2394,7 +2396,7 @@ dump_block_stats(spa_t *spa)
|
||||
* it's not part of any space map) is a double allocation,
|
||||
* reference to a freed block, or an unclaimed log block.
|
||||
*/
|
||||
bzero(&zcb, sizeof(zdb_cb_t));
|
||||
bzero(&zcb, sizeof (zdb_cb_t));
|
||||
zdb_leak_init(spa, &zcb);
|
||||
|
||||
/*
|
||||
|
@ -313,8 +313,8 @@ zfs_sort(const void *larg, const void *rarg, void *data)
|
||||
} else if (psc->sc_prop == ZFS_PROP_NAME) {
|
||||
lvalid = rvalid = B_TRUE;
|
||||
|
||||
(void) strlcpy(lbuf, zfs_get_name(l), sizeof(lbuf));
|
||||
(void) strlcpy(rbuf, zfs_get_name(r), sizeof(rbuf));
|
||||
(void) strlcpy(lbuf, zfs_get_name(l), sizeof (lbuf));
|
||||
(void) strlcpy(rbuf, zfs_get_name(r), sizeof (rbuf));
|
||||
|
||||
lstr = lbuf;
|
||||
rstr = rbuf;
|
||||
|
@ -2879,7 +2879,7 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
|
||||
|
||||
if (pl->pl_prop == ZFS_PROP_NAME) {
|
||||
(void) strlcpy(property, zfs_get_name(zhp),
|
||||
sizeof(property));
|
||||
sizeof (property));
|
||||
propstr = property;
|
||||
right_justify = zfs_prop_align_right(pl->pl_prop);
|
||||
} else if (pl->pl_prop != ZPROP_INVAL) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#ifndef _ZPIOS_H
|
||||
#define _ZPIOS_H
|
||||
@ -60,7 +60,8 @@
|
||||
|
||||
#define KMGT_SIZE 16
|
||||
|
||||
/* All offsets, sizes and counts can be passed to the application in
|
||||
/*
|
||||
* All offsets, sizes and counts can be passed to the application in
|
||||
* multiple ways.
|
||||
* 1. a value (stored in val[0], val_count will be 1)
|
||||
* 2. a comma separated list of values (stored in val[], using val_count)
|
||||
@ -72,7 +73,7 @@ typedef struct pios_range_repeat {
|
||||
uint64_t val_low;
|
||||
uint64_t val_high;
|
||||
uint64_t val_inc_perc;
|
||||
uint64_t next_val; /* Used for multiple runs in get_next() */
|
||||
uint64_t next_val; /* For multiple runs in get_next() */
|
||||
} range_repeat_t;
|
||||
|
||||
typedef struct cmd_args {
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
* against ZFS while still being flexibly controlled from user space.
|
||||
*
|
||||
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
@ -42,7 +42,8 @@
|
||||
#include <sys/ioctl.h>
|
||||
#include "zpios.h"
|
||||
|
||||
static const char short_opt[] = "t:l:h:e:n:i:j:k:o:m:q:r:c:a:b:g:s:A:B:C:"
|
||||
static const char short_opt[] =
|
||||
"t:l:h:e:n:i:j:k:o:m:q:r:c:a:b:g:s:A:B:C:"
|
||||
"L:p:M:xP:R:G:I:N:T:VzOfHv?";
|
||||
static const struct option long_opt[] = {
|
||||
{"threadcount", required_argument, 0, 't' },
|
||||
@ -133,7 +134,7 @@ usage(void)
|
||||
" --verbose -v =increase verbosity\n"
|
||||
" --help -? =this help\n\n");
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void args_fini(cmd_args_t *args)
|
||||
@ -155,17 +156,17 @@ args_init(int argc, char **argv)
|
||||
|
||||
if (argc == 1) {
|
||||
usage();
|
||||
return (cmd_args_t *)NULL;
|
||||
return ((cmd_args_t *)NULL);
|
||||
}
|
||||
|
||||
/* Configure and populate the args structures */
|
||||
args = malloc(sizeof(*args));
|
||||
args = malloc(sizeof (*args));
|
||||
if (args == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
memset(args, 0, sizeof (*args));
|
||||
|
||||
while ((c=getopt_long(argc, argv, short_opt, long_opt, NULL)) != -1) {
|
||||
while ((c = getopt_long(argc, argv, short_opt, long_opt, NULL)) != -1) {
|
||||
rc = 0;
|
||||
|
||||
switch (c) {
|
||||
@ -271,13 +272,15 @@ args_init(int argc, char **argv)
|
||||
strncpy(args->log, optarg, ZPIOS_PATH_SIZE - 1);
|
||||
break;
|
||||
case 'I': /* --regionnoise */
|
||||
rc = set_noise(&args->regionnoise, optarg, "regionnoise");
|
||||
rc = set_noise(&args->regionnoise, optarg,
|
||||
"regionnoise");
|
||||
break;
|
||||
case 'N': /* --chunknoise */
|
||||
rc = set_noise(&args->chunknoise, optarg, "chunknoise");
|
||||
break;
|
||||
case 'T': /* --threaddelay */
|
||||
rc = set_noise(&args->thread_delay, optarg, "threaddelay");
|
||||
rc = set_noise(&args->thread_delay, optarg,
|
||||
"threaddelay");
|
||||
break;
|
||||
case 'V': /* --verify */
|
||||
args->flags |= DMU_VERIFY;
|
||||
@ -301,7 +304,8 @@ args_init(int argc, char **argv)
|
||||
rc = 1;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,"Unknown option '%s'\n",argv[optind-1]);
|
||||
fprintf(stderr, "Unknown option '%s'\n",
|
||||
argv[optind - 1]);
|
||||
rc = EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -309,7 +313,7 @@ args_init(int argc, char **argv)
|
||||
if (rc) {
|
||||
usage();
|
||||
args_fini(args);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -323,7 +327,7 @@ args_init(int argc, char **argv)
|
||||
fprintf(stderr, "Error: Pool not specificed\n");
|
||||
usage();
|
||||
args_fini(args);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if ((args->flags & (DMU_WRITE_ZC | DMU_READ_ZC)) &&
|
||||
@ -332,10 +336,10 @@ args_init(int argc, char **argv)
|
||||
"used for performance analysis only\n");
|
||||
usage();
|
||||
args_fini(args);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return args;
|
||||
return (args);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -344,7 +348,7 @@ dev_clear(void)
|
||||
zpios_cfg_t cfg;
|
||||
int rc;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
memset(&cfg, 0, sizeof (cfg));
|
||||
cfg.cfg_magic = ZPIOS_CFG_MAGIC;
|
||||
cfg.cfg_cmd = ZPIOS_CFG_BUFFER_CLEAR;
|
||||
cfg.cfg_arg1 = 0;
|
||||
@ -356,7 +360,7 @@ dev_clear(void)
|
||||
|
||||
lseek(zpiosctl_fd, 0, SEEK_SET);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/* Passing a size of zero simply results in querying the current size */
|
||||
@ -366,7 +370,7 @@ dev_size(int size)
|
||||
zpios_cfg_t cfg;
|
||||
int rc;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
memset(&cfg, 0, sizeof (cfg));
|
||||
cfg.cfg_magic = ZPIOS_CFG_MAGIC;
|
||||
cfg.cfg_cmd = ZPIOS_CFG_BUFFER_SIZE;
|
||||
cfg.cfg_arg1 = size;
|
||||
@ -375,10 +379,10 @@ dev_size(int size)
|
||||
if (rc) {
|
||||
fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
|
||||
(unsigned long) ZPIOS_CFG, cfg.cfg_cmd, errno);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
return cfg.cfg_rc1;
|
||||
return (cfg.cfg_rc1);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -422,7 +426,7 @@ dev_init(void)
|
||||
}
|
||||
|
||||
memset(zpios_buffer, 0, zpios_buffer_size);
|
||||
return 0;
|
||||
return (0);
|
||||
error:
|
||||
if (zpiosctl_fd != -1) {
|
||||
if (close(zpiosctl_fd) == -1) {
|
||||
@ -431,7 +435,7 @@ error:
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -443,35 +447,35 @@ get_next(uint64_t *val, range_repeat_t *range)
|
||||
(range->val_low * range->next_val / 100);
|
||||
|
||||
if (*val > range->val_high)
|
||||
return 0; /* No more values, limit exceeded */
|
||||
return (0); /* No more values, limit exceeded */
|
||||
|
||||
if (!range->next_val)
|
||||
range->next_val = range->val_inc_perc;
|
||||
else
|
||||
range->next_val = range->next_val+range->val_inc_perc;
|
||||
range->next_val = range->next_val + range->val_inc_perc;
|
||||
|
||||
return 1; /* more values to come */
|
||||
return (1); /* more values to come */
|
||||
|
||||
/* if only one val is given */
|
||||
} else if (range->val_count == 1) {
|
||||
if (range->next_val)
|
||||
return 0; /* No more values, we only have one */
|
||||
return (0); /* No more values, we only have one */
|
||||
|
||||
*val = range->val[0];
|
||||
range->next_val = 1;
|
||||
return 1; /* more values to come */
|
||||
return (1); /* more values to come */
|
||||
|
||||
/* if comma separated values are given */
|
||||
} else if (range->val_count > 1) {
|
||||
if (range->next_val > range->val_count - 1)
|
||||
return 0; /* No more values, limit exceeded */
|
||||
return (0); /* No more values, limit exceeded */
|
||||
|
||||
*val = range->val[range->next_val];
|
||||
range->next_val++;
|
||||
return 1; /* more values to come */
|
||||
return (1); /* more values to come */
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -483,10 +487,12 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
|
||||
|
||||
dev_clear();
|
||||
|
||||
cmd_size = sizeof(zpios_cmd_t) + ((T + N + 1) * sizeof(zpios_stats_t));
|
||||
cmd_size =
|
||||
sizeof (zpios_cmd_t)
|
||||
+ ((T + N + 1) * sizeof (zpios_stats_t));
|
||||
cmd = (zpios_cmd_t *)malloc(cmd_size);
|
||||
if (cmd == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
memset(cmd, 0, cmd_size);
|
||||
cmd->cmd_magic = ZPIOS_CMD_MAGIC;
|
||||
@ -504,7 +510,7 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
|
||||
cmd->cmd_chunk_noise = args->chunknoise;
|
||||
cmd->cmd_thread_delay = args->thread_delay;
|
||||
cmd->cmd_flags = args->flags;
|
||||
cmd->cmd_data_size = (T + N + 1) * sizeof(zpios_stats_t);
|
||||
cmd->cmd_data_size = (T + N + 1) * sizeof (zpios_stats_t);
|
||||
|
||||
rc = ioctl(zpiosctl_fd, ZPIOS_CMD, cmd);
|
||||
if (rc)
|
||||
@ -524,7 +530,7 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
|
||||
|
||||
free(cmd);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -540,7 +546,7 @@ run_offsets(cmd_args_t *args)
|
||||
}
|
||||
|
||||
args->O.next_val = 0;
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -552,7 +558,7 @@ run_region_counts(cmd_args_t *args)
|
||||
rc = run_offsets(args);
|
||||
|
||||
args->N.next_val = 0;
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -564,14 +570,14 @@ run_region_sizes(cmd_args_t *args)
|
||||
if (args->current_S < args->current_C) {
|
||||
fprintf(stderr, "Error: in any run chunksize can "
|
||||
"not be smaller than regionsize.\n");
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
rc = run_region_counts(args);
|
||||
}
|
||||
|
||||
args->S.next_val = 0;
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -584,7 +590,7 @@ run_chunk_sizes(cmd_args_t *args)
|
||||
}
|
||||
|
||||
args->C.next_val = 0;
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -595,7 +601,7 @@ run_thread_counts(cmd_args_t *args)
|
||||
while (rc == 0 && get_next((uint64_t *)&args->current_T, &args->T))
|
||||
rc = run_chunk_sizes(args);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
int
|
||||
@ -625,5 +631,5 @@ out:
|
||||
args_fini(args);
|
||||
|
||||
dev_fini();
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
@ -49,7 +49,7 @@ kmgt_to_uint64(const char *str, uint64_t *val)
|
||||
|
||||
*val = strtoll(str, &endptr, 0);
|
||||
if ((str == endptr) && (*val == 0))
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
|
||||
switch (endptr[0]) {
|
||||
case 'k': case 'K':
|
||||
@ -70,7 +70,7 @@ kmgt_to_uint64(const char *str, uint64_t *val)
|
||||
rc = EINVAL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static char *
|
||||
@ -85,12 +85,12 @@ uint64_to_kmgt(char *str, uint64_t val)
|
||||
}
|
||||
|
||||
if (i >= 4)
|
||||
(void)snprintf(str, KMGT_SIZE-1, "inf");
|
||||
(void) snprintf(str, KMGT_SIZE-1, "inf");
|
||||
else
|
||||
(void)snprintf(str, KMGT_SIZE-1, "%lu%c", (unsigned long)val,
|
||||
(void) snprintf(str, KMGT_SIZE-1, "%lu%c", (unsigned long)val,
|
||||
(i == -1) ? '\0' : postfix[i]);
|
||||
|
||||
return str;
|
||||
return (str);
|
||||
}
|
||||
|
||||
static char *
|
||||
@ -106,12 +106,12 @@ kmgt_per_sec(char *str, uint64_t v, double t)
|
||||
}
|
||||
|
||||
if (i >= 4)
|
||||
(void)snprintf(str, KMGT_SIZE-1, "inf");
|
||||
(void) snprintf(str, KMGT_SIZE-1, "inf");
|
||||
else
|
||||
(void)snprintf(str, KMGT_SIZE-1, "%.2f%c", val,
|
||||
(void) snprintf(str, KMGT_SIZE-1, "%.2f%c", val,
|
||||
(i == -1) ? '\0' : postfix[i]);
|
||||
|
||||
return str;
|
||||
return (str);
|
||||
}
|
||||
|
||||
static char *
|
||||
@ -126,7 +126,7 @@ print_flags(char *str, uint32_t flags)
|
||||
str[6] = (flags & DMU_WRITE_NOWAIT) ? 'O' : '-';
|
||||
str[7] = '\0';
|
||||
|
||||
return str;
|
||||
return (str);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -138,13 +138,13 @@ regex_match(const char *string, char *pattern)
|
||||
rc = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB | REG_ICASE);
|
||||
if (rc) {
|
||||
fprintf(stderr, "Error: Couldn't do regcomp, %d\n", rc);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
rc = regexec(&re, string, (size_t) 0, NULL, 0);
|
||||
regfree(&re);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/* fills the pios_range_repeat structure of comma separated values */
|
||||
@ -156,14 +156,15 @@ split_string(const char *optarg, char *pattern, range_repeat_t *range)
|
||||
int rc, i = 0;
|
||||
|
||||
if ((rc = regex_match(optarg, pattern)))
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
cp = strdup(optarg);
|
||||
if (cp == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
do {
|
||||
/* STRTOK(3) Each subsequent call, with a null pointer as the
|
||||
/*
|
||||
* STRTOK(3) Each subsequent call, with a null pointer as the
|
||||
* value of the * first argument, starts searching from the
|
||||
* saved pointer and behaves as described above.
|
||||
*/
|
||||
@ -177,7 +178,7 @@ split_string(const char *optarg, char *pattern, range_repeat_t *range)
|
||||
kmgt_to_uint64(token[i], &range->val[i]);
|
||||
|
||||
free(cp);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -195,14 +196,16 @@ set_count(char *pattern1, char *pattern2, range_repeat_t *range,
|
||||
} else if (split_string(optarg, pattern2, range) < 0) {
|
||||
fprintf(stderr, "Error: Incorrect pattern for %s, '%s'\n",
|
||||
arg, optarg);
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* validates the value with regular expression and sets low, high, incr
|
||||
* according to value at which flag will be set. Sets the flag after. */
|
||||
/*
|
||||
* Validates the value with regular expression and sets low, high, incr
|
||||
* according to value at which flag will be set. Sets the flag after.
|
||||
*/
|
||||
int
|
||||
set_lhi(char *pattern, range_repeat_t *range, char *optarg,
|
||||
int flag, uint32_t *flag_thread, char *arg)
|
||||
@ -212,7 +215,7 @@ set_lhi(char *pattern, range_repeat_t *range, char *optarg,
|
||||
if ((rc = regex_match(optarg, pattern))) {
|
||||
fprintf(stderr, "Error: Wrong pattern in %s, '%s'\n",
|
||||
arg, optarg);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
switch (flag) {
|
||||
@ -231,7 +234,7 @@ set_lhi(char *pattern, range_repeat_t *range, char *optarg,
|
||||
|
||||
*flag_thread |= flag;
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -241,10 +244,10 @@ set_noise(uint64_t *noise, char *optarg, char *arg)
|
||||
kmgt_to_uint64(optarg, noise);
|
||||
} else {
|
||||
fprintf(stderr, "Error: Incorrect pattern for %s\n", arg);
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -255,7 +258,7 @@ set_load_params(cmd_args_t *args, char *optarg)
|
||||
|
||||
search = strdup(optarg);
|
||||
if (search == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
while ((param = strtok(search, comma)) != NULL) {
|
||||
search = NULL;
|
||||
@ -275,51 +278,57 @@ set_load_params(cmd_args_t *args, char *optarg)
|
||||
|
||||
free(search);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
|
||||
/* checks the low, high, increment values against the single value for
|
||||
/*
|
||||
* Checks the low, high, increment values against the single value for
|
||||
* mutual exclusion, for e.g threadcount is mutually exclusive to
|
||||
* threadcount_low, ..._high, ..._incr */
|
||||
* threadcount_low, ..._high, ..._incr
|
||||
*/
|
||||
int
|
||||
check_mutual_exclusive_command_lines(uint32_t flag, char *arg)
|
||||
{
|
||||
if ((flag & FLAG_SET) && (flag & (FLAG_LOW | FLAG_HIGH | FLAG_INCR))) {
|
||||
fprintf(stderr, "Error: --%s can not be given with --%s_low, "
|
||||
"--%s_high or --%s_incr.\n", arg, arg, arg, arg);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if ((flag & (FLAG_LOW | FLAG_HIGH | FLAG_INCR)) && !(flag & FLAG_SET)){
|
||||
if ((flag & (FLAG_LOW | FLAG_HIGH | FLAG_INCR)) && !(flag & FLAG_SET)) {
|
||||
if (flag != (FLAG_LOW | FLAG_HIGH | FLAG_INCR)) {
|
||||
fprintf(stderr, "Error: One or more values missing "
|
||||
"from --%s_low, --%s_high, --%s_incr.\n",
|
||||
arg, arg, arg);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
return (1);
|
||||
}
|
||||
|
||||
void
|
||||
print_stats_header(cmd_args_t *args)
|
||||
{
|
||||
if (args->verbose) {
|
||||
printf("status name id\tth-cnt\trg-cnt\trg-sz\t"
|
||||
printf(
|
||||
"status name id\tth-cnt\trg-cnt\trg-sz\t"
|
||||
"ch-sz\toffset\trg-no\tch-no\tth-dly\tflags\ttime\t"
|
||||
"cr-time\trm-time\twr-time\trd-time\twr-data\twr-ch\t"
|
||||
"wr-bw\trd-data\trd-ch\trd-bw\n");
|
||||
printf("------------------------------------------------"
|
||||
printf(
|
||||
"------------------------------------------------"
|
||||
"------------------------------------------------"
|
||||
"------------------------------------------------"
|
||||
"----------------------------------------------\n");
|
||||
} else {
|
||||
printf("status name id\t"
|
||||
printf(
|
||||
"status name id\t"
|
||||
"wr-data\twr-ch\twr-bw\t"
|
||||
"rd-data\trd-ch\trd-bw\n");
|
||||
printf("-----------------------------------------"
|
||||
printf(
|
||||
"-----------------------------------------"
|
||||
"--------------------------------------\n");
|
||||
}
|
||||
}
|
||||
|
@ -2568,7 +2568,7 @@ get_columns(void)
|
||||
columns = 999;
|
||||
}
|
||||
|
||||
return columns;
|
||||
return (columns);
|
||||
}
|
||||
|
||||
int
|
||||
@ -5037,19 +5037,21 @@ get_history_one(zpool_handle_t *zhp, void *data)
|
||||
}
|
||||
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
|
||||
zfs_history_event_names[ievent],
|
||||
(long long int)fnvlist_lookup_uint64(rec, ZPOOL_HIST_TXG),
|
||||
(longlong_t) fnvlist_lookup_uint64(
|
||||
rec, ZPOOL_HIST_TXG),
|
||||
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
|
||||
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
|
||||
if (!cb->internal)
|
||||
continue;
|
||||
(void) printf("%s [txg:%lld] %s", tbuf,
|
||||
(long long int)fnvlist_lookup_uint64(rec, ZPOOL_HIST_TXG),
|
||||
(longlong_t) fnvlist_lookup_uint64(
|
||||
rec, ZPOOL_HIST_TXG),
|
||||
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
|
||||
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
|
||||
(void) printf(" %s (%llu)",
|
||||
fnvlist_lookup_string(rec,
|
||||
ZPOOL_HIST_DSNAME),
|
||||
(long long unsigned int)fnvlist_lookup_uint64(rec,
|
||||
(u_longlong_t)fnvlist_lookup_uint64(rec,
|
||||
ZPOOL_HIST_DSID));
|
||||
}
|
||||
(void) printf(" %s", fnvlist_lookup_string(rec,
|
||||
@ -5168,7 +5170,7 @@ zpool_do_events_short(nvlist_t *nvl)
|
||||
(void) strncpy(str, ctime_str+4, 6); /* 'Jun 30' */
|
||||
(void) strncpy(str+7, ctime_str+20, 4); /* '1993' */
|
||||
(void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */
|
||||
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]);/* '.123456789' */
|
||||
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
|
||||
(void) printf(gettext("%s "), str);
|
||||
|
||||
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
|
||||
@ -5357,7 +5359,8 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
|
||||
|
||||
(void) nvpair_value_int64_array(nvp, &val, &nelem);
|
||||
for (i = 0; i < nelem; i++)
|
||||
printf(gettext("0x%llx "), (u_longlong_t)val[i]);
|
||||
printf(gettext("0x%llx "),
|
||||
(u_longlong_t)val[i]);
|
||||
|
||||
break;
|
||||
}
|
||||
@ -5368,7 +5371,8 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
|
||||
|
||||
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
|
||||
for (i = 0; i < nelem; i++)
|
||||
printf(gettext("0x%llx "), (u_longlong_t)val[i]);
|
||||
printf(gettext("0x%llx "),
|
||||
(u_longlong_t)val[i]);
|
||||
|
||||
break;
|
||||
}
|
||||
@ -5476,7 +5480,7 @@ zpool_do_events(int argc, char **argv)
|
||||
else
|
||||
ret = zpool_do_events_next(&opts);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -5690,8 +5694,7 @@ main(int argc, char **argv)
|
||||
/*
|
||||
* Special case '-?'
|
||||
*/
|
||||
if ((strcmp(cmdname, "-?") == 0) ||
|
||||
strcmp(cmdname, "--help") == 0)
|
||||
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
|
||||
usage(B_TRUE);
|
||||
|
||||
if ((g_zfs = libzfs_init()) == NULL)
|
||||
|
@ -44,7 +44,8 @@ uint_t num_logs(nvlist_t *nv);
|
||||
*/
|
||||
|
||||
nvlist_t *make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force,
|
||||
int check_rep, boolean_t replacing, boolean_t dryrun, int argc, char **argv);
|
||||
int check_rep, boolean_t replacing, boolean_t dryrun, int argc,
|
||||
char **argv);
|
||||
nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname,
|
||||
nvlist_t *props, splitflags_t flags, int argc, char **argv);
|
||||
|
||||
|
@ -187,7 +187,7 @@ static vdev_disk_db_entry_t vdev_disk_database[] = {
|
||||
{"ATA SAMSUNG MCCOE32G", 4096},
|
||||
{"ATA SAMSUNG MCCOE64G", 4096},
|
||||
{"ATA SAMSUNG SSD PM80", 4096},
|
||||
/* Imported from Open Solaris*/
|
||||
/* Imported from Open Solaris */
|
||||
{"ATA MARVELL SD88SA02", 4096},
|
||||
/* Advanced format Hard drives */
|
||||
{"ATA Hitachi HDS5C303", 4096},
|
||||
@ -231,10 +231,10 @@ check_sector_size_database(char *path, int *sector_size)
|
||||
int i;
|
||||
|
||||
/* Prepare INQUIRY command */
|
||||
memset(&io_hdr, 0, sizeof(sg_io_hdr_t));
|
||||
memset(&io_hdr, 0, sizeof (sg_io_hdr_t));
|
||||
io_hdr.interface_id = 'S';
|
||||
io_hdr.cmd_len = sizeof(inq_cmd_blk);
|
||||
io_hdr.mx_sb_len = sizeof(sense_buffer);
|
||||
io_hdr.cmd_len = sizeof (inq_cmd_blk);
|
||||
io_hdr.mx_sb_len = sizeof (sense_buffer);
|
||||
io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
|
||||
io_hdr.dxfer_len = INQ_REPLY_LEN;
|
||||
io_hdr.dxferp = inq_buff;
|
||||
@ -412,7 +412,7 @@ check_disk(const char *path, blkid_cache cache, int force,
|
||||
|
||||
/* This is not a wholedisk we only check the given partition */
|
||||
if (!iswholedisk)
|
||||
return check_slice(path, cache, force, isspare);
|
||||
return (check_slice(path, cache, force, isspare));
|
||||
|
||||
/*
|
||||
* When the device is a whole disk try to read the efi partition
|
||||
@ -424,19 +424,19 @@ check_disk(const char *path, blkid_cache cache, int force,
|
||||
*/
|
||||
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0) {
|
||||
check_error(errno);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
|
||||
(void) close(fd);
|
||||
|
||||
if (force) {
|
||||
return 0;
|
||||
return (0);
|
||||
} else {
|
||||
vdev_error(gettext("%s does not contain an EFI "
|
||||
"label but it may contain partition\n"
|
||||
"information in the MBR.\n"), path);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -451,11 +451,11 @@ check_disk(const char *path, blkid_cache cache, int force,
|
||||
|
||||
if (force) {
|
||||
/* Partitions will no be created using the backup */
|
||||
return 0;
|
||||
return (0);
|
||||
} else {
|
||||
vdev_error(gettext("%s contains a corrupt primary "
|
||||
"EFI label.\n"), path);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,18 +500,18 @@ check_device(const char *path, boolean_t force,
|
||||
|
||||
if ((err = blkid_get_cache(&cache, NULL)) != 0) {
|
||||
check_error(err);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if ((err = blkid_probe_all(cache)) != 0) {
|
||||
blkid_put_cache(cache);
|
||||
check_error(err);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
#endif /* HAVE_LIBBLKID */
|
||||
|
||||
return check_disk(path, cache, force, isspare, iswholedisk);
|
||||
return (check_disk(path, cache, force, isspare, iswholedisk));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -558,8 +558,8 @@ is_shorthand_path(const char *arg, char *path,
|
||||
return (0);
|
||||
}
|
||||
|
||||
strlcpy(path, arg, sizeof(path));
|
||||
memset(statbuf, 0, sizeof(*statbuf));
|
||||
strlcpy(path, arg, sizeof (path));
|
||||
memset(statbuf, 0, sizeof (*statbuf));
|
||||
*wholedisk = B_FALSE;
|
||||
|
||||
return (error);
|
||||
@ -1136,7 +1136,7 @@ zero_label(char *path)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1225,7 +1225,7 @@ make_disks(zpool_handle_t *zhp, nvlist_t *nv)
|
||||
* and then block until udev creates the new link.
|
||||
*/
|
||||
if (!is_exclusive || !is_spare(NULL, udevpath)) {
|
||||
ret = strncmp(udevpath,UDISK_ROOT,strlen(UDISK_ROOT));
|
||||
ret = strncmp(udevpath, UDISK_ROOT, strlen(UDISK_ROOT));
|
||||
if (ret == 0) {
|
||||
ret = lstat64(udevpath, &statbuf);
|
||||
if (ret == 0 && S_ISLNK(statbuf.st_mode))
|
||||
@ -1502,8 +1502,8 @@ construct_spec(nvlist_t *props, int argc, char **argv)
|
||||
children * sizeof (nvlist_t *));
|
||||
if (child == NULL)
|
||||
zpool_no_memory();
|
||||
if ((nv = make_leaf_vdev(props, argv[c], B_FALSE))
|
||||
== NULL)
|
||||
if ((nv = make_leaf_vdev(props, argv[c],
|
||||
B_FALSE)) == NULL)
|
||||
return (NULL);
|
||||
child[children - 1] = nv;
|
||||
}
|
||||
@ -1558,7 +1558,8 @@ construct_spec(nvlist_t *props, int argc, char **argv)
|
||||
* We have a device. Pass off to make_leaf_vdev() to
|
||||
* construct the appropriate nvlist describing the vdev.
|
||||
*/
|
||||
if ((nv = make_leaf_vdev(props, argv[0], is_log)) == NULL)
|
||||
if ((nv = make_leaf_vdev(props, argv[0],
|
||||
is_log)) == NULL)
|
||||
return (NULL);
|
||||
if (is_log)
|
||||
nlogs++;
|
||||
|
@ -3587,7 +3587,7 @@ ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
|
||||
int size;
|
||||
int b;
|
||||
|
||||
size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
|
||||
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
|
||||
od = umem_alloc(size, UMEM_NOFAIL);
|
||||
batchsize = OD_ARRAY_SIZE;
|
||||
|
||||
@ -3621,7 +3621,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
ztest_od_t *od;
|
||||
|
||||
objset_t *os = zd->zd_os;
|
||||
size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
|
||||
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
|
||||
od = umem_alloc(size, UMEM_NOFAIL);
|
||||
dmu_tx_t *tx;
|
||||
int i, freeit, error;
|
||||
@ -3911,7 +3911,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
arc_buf_t **bigbuf_arcbufs;
|
||||
dmu_object_info_t doi;
|
||||
|
||||
size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
|
||||
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
|
||||
od = umem_alloc(size, UMEM_NOFAIL);
|
||||
|
||||
/*
|
||||
@ -4132,7 +4132,7 @@ ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
{
|
||||
ztest_od_t *od;
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
|
||||
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
|
||||
|
||||
@ -4149,7 +4149,7 @@ ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
while (ztest_random(10) != 0)
|
||||
ztest_io(zd, od->od_object, offset);
|
||||
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
void
|
||||
@ -4162,17 +4162,18 @@ ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
|
||||
uint64_t blocksize = ztest_random_blocksize();
|
||||
void *data;
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
|
||||
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) {
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
|
||||
!ztest_random(2)) != 0) {
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4190,7 +4191,7 @@ ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
|
||||
}
|
||||
|
||||
umem_free(data, blocksize);
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4215,7 +4216,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
int error;
|
||||
char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
|
||||
@ -4338,7 +4339,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
|
||||
dmu_tx_commit(tx);
|
||||
out:
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4352,7 +4353,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
|
||||
uint64_t object, txg;
|
||||
int i;
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
|
||||
@ -4385,7 +4386,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
|
||||
dmu_tx_commit(tx);
|
||||
}
|
||||
out:
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
@ -4401,11 +4402,11 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
char name[20], string_value[20];
|
||||
void *data;
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4499,7 +4500,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
if (tx != NULL)
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4590,11 +4591,11 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
uint64_t old_txg, txg;
|
||||
int i, error = 0;
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4637,7 +4638,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
|
||||
}
|
||||
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4709,7 +4710,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
@ -4790,11 +4791,12 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
dmu_objset_name(os, osname);
|
||||
|
||||
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu", (long long unsigned int)id);
|
||||
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu",
|
||||
(u_longlong_t)id);
|
||||
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
|
||||
(void) snprintf(clonename, sizeof (clonename),
|
||||
"%s/ch1_%llu", osname, (long long unsigned int)id);
|
||||
(void) snprintf(tag, sizeof (tag), "tag_%llu", (long long unsigned int)id);
|
||||
"%s/ch1_%llu", osname, (u_longlong_t)id);
|
||||
(void) snprintf(tag, sizeof (tag), "tag_%llu", (u_longlong_t)id);
|
||||
|
||||
/*
|
||||
* Clean up from any previous run.
|
||||
@ -5124,11 +5126,11 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
|
||||
blocksize = ztest_random_blocksize();
|
||||
blocksize = MIN(blocksize, 2048); /* because we write so many */
|
||||
|
||||
od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
|
||||
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
|
||||
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
|
||||
|
||||
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5143,7 +5145,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
|
||||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
|
||||
B_FALSE) != 0) {
|
||||
(void) rw_exit(&ztest_name_lock);
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5158,7 +5160,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
(void) rw_exit(&ztest_name_lock);
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5207,7 +5209,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
|
||||
zio_buf_free(buf, psize);
|
||||
|
||||
(void) rw_exit(&ztest_name_lock);
|
||||
umem_free(od, sizeof(ztest_od_t));
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -33,7 +33,8 @@
|
||||
#include <sys/zfs_znode.h>
|
||||
#include <sys/fs/zfs.h>
|
||||
|
||||
int ioctl_get_msg(char *var, int fd)
|
||||
static int
|
||||
ioctl_get_msg(char *var, int fd)
|
||||
{
|
||||
int error = 0;
|
||||
char msg[ZFS_MAXNAMELEN];
|
||||
@ -47,7 +48,8 @@ int ioctl_get_msg(char *var, int fd)
|
||||
return (error);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
int fd, error = 0;
|
||||
char zvol_name[ZFS_MAXNAMELEN], zvol_name_part[ZFS_MAXNAMELEN];
|
||||
|
@ -46,7 +46,7 @@ blk_fetch_request(struct request_queue *q)
|
||||
if (req)
|
||||
blkdev_dequeue_request(req);
|
||||
|
||||
return req;
|
||||
return (req);
|
||||
}
|
||||
#endif /* HAVE_BLK_FETCH_REQUEST */
|
||||
|
||||
@ -79,7 +79,7 @@ __blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
req->hard_cur_sectors = nr_bytes >> 9;
|
||||
end_request(req, ((error == 0) ? 1 : error));
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
@ -92,17 +92,17 @@ blk_end_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
rc = __blk_end_request(req, error, nr_bytes);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
#else
|
||||
# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
|
||||
#ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
|
||||
/*
|
||||
* Define required to avoid conflicting 2.6.29 non-static prototype for a
|
||||
* GPL-only version of the helper. As of 2.6.31 the helper is available
|
||||
* to non-GPL modules and is not explicitly exported GPL-only.
|
||||
*/
|
||||
# define __blk_end_request __blk_end_request_x
|
||||
# define blk_end_request blk_end_request_x
|
||||
#define __blk_end_request __blk_end_request_x
|
||||
#define blk_end_request blk_end_request_x
|
||||
|
||||
static inline bool
|
||||
__blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||
@ -115,7 +115,7 @@ __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||
req->hard_cur_sectors = nr_bytes >> 9;
|
||||
end_request(req, ((error == 0) ? 1 : error));
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
static inline bool
|
||||
blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||
@ -127,9 +127,9 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
|
||||
rc = __blk_end_request_x(req, error, nr_bytes);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
|
||||
#endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
|
||||
#endif /* HAVE_BLK_END_REQUEST */
|
||||
|
||||
/*
|
||||
@ -153,7 +153,7 @@ __blk_queue_flush(struct request_queue *q, unsigned int flags)
|
||||
static inline sector_t
|
||||
blk_rq_pos(struct request *req)
|
||||
{
|
||||
return req->sector;
|
||||
return (req->sector);
|
||||
}
|
||||
#endif /* HAVE_BLK_RQ_POS */
|
||||
|
||||
@ -161,7 +161,7 @@ blk_rq_pos(struct request *req)
|
||||
static inline unsigned int
|
||||
blk_rq_sectors(struct request *req)
|
||||
{
|
||||
return req->nr_sectors;
|
||||
return (req->nr_sectors);
|
||||
}
|
||||
#endif /* HAVE_BLK_RQ_SECTORS */
|
||||
|
||||
@ -175,7 +175,7 @@ blk_rq_sectors(struct request *req)
|
||||
static inline unsigned int
|
||||
__blk_rq_bytes(struct request *req)
|
||||
{
|
||||
return blk_rq_sectors(req) << 9;
|
||||
return (blk_rq_sectors(req) << 9);
|
||||
}
|
||||
#endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
|
||||
|
||||
@ -256,7 +256,7 @@ get_disk_ro(struct gendisk *disk)
|
||||
if (disk->part[0])
|
||||
policy = disk->part[0]->policy;
|
||||
|
||||
return policy;
|
||||
return (policy);
|
||||
}
|
||||
#endif /* HAVE_GET_DISK_RO */
|
||||
|
||||
@ -274,14 +274,14 @@ struct req_iterator {
|
||||
struct bio *bio;
|
||||
};
|
||||
|
||||
# define for_each_bio(_bio) \
|
||||
#define for_each_bio(_bio) \
|
||||
for (; _bio; _bio = _bio->bi_next)
|
||||
|
||||
# define __rq_for_each_bio(_bio, rq) \
|
||||
#define __rq_for_each_bio(_bio, rq) \
|
||||
if ((rq->bio)) \
|
||||
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
|
||||
|
||||
# define rq_for_each_segment(bvl, _rq, _iter) \
|
||||
#define rq_for_each_segment(bvl, _rq, _iter) \
|
||||
__rq_for_each_bio(_iter.bio, _rq) \
|
||||
bio_for_each_segment(bvl, _iter.bio, _iter.i)
|
||||
#endif /* HAVE_RQ_FOR_EACH_SEGMENT */
|
||||
@ -315,21 +315,23 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
|
||||
#ifdef HAVE_BIO_RW_FAILFAST_DTD
|
||||
/* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
|
||||
*flags |=
|
||||
((1 << BIO_RW_FAILFAST_DEV) |
|
||||
*flags |= (
|
||||
(1 << BIO_RW_FAILFAST_DEV) |
|
||||
(1 << BIO_RW_FAILFAST_TRANSPORT) |
|
||||
(1 << BIO_RW_FAILFAST_DRIVER));
|
||||
#else
|
||||
# ifdef HAVE_BIO_RW_FAILFAST
|
||||
#ifdef HAVE_BIO_RW_FAILFAST
|
||||
/* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */
|
||||
*flags |= (1 << BIO_RW_FAILFAST);
|
||||
# else
|
||||
# ifdef HAVE_REQ_FAILFAST_MASK
|
||||
/* REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
|
||||
* the BIO_* and REQ_* flags were unified under REQ_* flags. */
|
||||
#else
|
||||
#ifdef HAVE_REQ_FAILFAST_MASK
|
||||
/*
|
||||
* REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
|
||||
* the BIO_* and REQ_* flags were unified under REQ_* flags.
|
||||
*/
|
||||
*flags |= REQ_FAILFAST_MASK;
|
||||
# endif /* HAVE_REQ_FAILFAST_MASK */
|
||||
# endif /* HAVE_BIO_RW_FAILFAST */
|
||||
#endif /* HAVE_REQ_FAILFAST_MASK */
|
||||
#endif /* HAVE_BIO_RW_FAILFAST */
|
||||
#endif /* HAVE_BIO_RW_FAILFAST_DTD */
|
||||
}
|
||||
|
||||
@ -346,12 +348,14 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* macro's to ensure the prototype and return value are handled.
|
||||
*/
|
||||
#ifdef HAVE_2ARGS_BIO_END_IO_T
|
||||
# define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z)
|
||||
# define BIO_END_IO_RETURN(rc) return
|
||||
#define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z)
|
||||
#define BIO_END_IO_RETURN(rc) return
|
||||
#else
|
||||
# define BIO_END_IO_PROTO(fn, x, y, z) static int fn(struct bio *x, \
|
||||
unsigned int y, int z)
|
||||
# define BIO_END_IO_RETURN(rc) return rc
|
||||
#define BIO_END_IO_PROTO(fn, x, y, z) static int fn( \
|
||||
struct bio *x, \
|
||||
unsigned int y, \
|
||||
int z)
|
||||
#define BIO_END_IO_RETURN(rc) return rc
|
||||
#endif /* HAVE_2ARGS_BIO_END_IO_T */
|
||||
|
||||
/*
|
||||
@ -370,15 +374,15 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* Used to exclusively open a block device from within the kernel.
|
||||
*/
|
||||
#if defined(HAVE_BLKDEV_GET_BY_PATH)
|
||||
# define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
|
||||
#define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
|
||||
(md) | FMODE_EXCL, hld)
|
||||
# define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
|
||||
#define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
|
||||
#elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
|
||||
# define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
|
||||
# define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
|
||||
#define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
|
||||
#define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
|
||||
#else
|
||||
# define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
|
||||
# define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
|
||||
#define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
|
||||
#define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
|
||||
#endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
|
||||
|
||||
/*
|
||||
@ -387,9 +391,9 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* it was unused.
|
||||
*/
|
||||
#ifdef HAVE_1ARG_INVALIDATE_BDEV
|
||||
# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
|
||||
#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
|
||||
#else
|
||||
# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
|
||||
#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
|
||||
#endif /* HAVE_1ARG_INVALIDATE_BDEV */
|
||||
|
||||
/*
|
||||
@ -398,7 +402,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* symbol was not exported.
|
||||
*/
|
||||
#ifndef HAVE_LOOKUP_BDEV
|
||||
# define lookup_bdev(path) ERR_PTR(-ENOTSUP)
|
||||
#define lookup_bdev(path) ERR_PTR(-ENOTSUP)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -416,13 +420,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* the logical block size interface and then the older hard sector size.
|
||||
*/
|
||||
#ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE
|
||||
# define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev)
|
||||
#define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev)
|
||||
#else
|
||||
# ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
|
||||
# define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
|
||||
# else
|
||||
# define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
|
||||
# endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
|
||||
#ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
|
||||
#define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
|
||||
#else
|
||||
#define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
|
||||
#endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
|
||||
#endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */
|
||||
|
||||
/*
|
||||
@ -438,13 +442,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* compatibility macros.
|
||||
*/
|
||||
#ifdef WRITE_FLUSH_FUA
|
||||
# define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
|
||||
# define VDEV_REQ_FLUSH REQ_FLUSH
|
||||
# define VDEV_REQ_FUA REQ_FUA
|
||||
#define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
|
||||
#define VDEV_REQ_FLUSH REQ_FLUSH
|
||||
#define VDEV_REQ_FUA REQ_FUA
|
||||
#else
|
||||
# define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
|
||||
# define VDEV_REQ_FLUSH REQ_HARDBARRIER
|
||||
# define VDEV_REQ_FUA REQ_HARDBARRIER
|
||||
#define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
|
||||
#define VDEV_REQ_FLUSH REQ_HARDBARRIER
|
||||
#define VDEV_REQ_FUA REQ_HARDBARRIER
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -452,7 +456,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
|
||||
* Use the normal I/O patch for discards.
|
||||
*/
|
||||
#ifdef REQ_DISCARD
|
||||
# define VDEV_REQ_DISCARD REQ_DISCARD
|
||||
#define VDEV_REQ_DISCARD REQ_DISCARD
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -74,8 +74,8 @@ d_clear_d_op(struct dentry *dentry)
|
||||
{
|
||||
#ifdef HAVE_D_SET_D_OP
|
||||
dentry->d_op = NULL;
|
||||
dentry->d_flags &=
|
||||
~(DCACHE_OP_HASH | DCACHE_OP_COMPARE |
|
||||
dentry->d_flags &= ~(
|
||||
DCACHE_OP_HASH | DCACHE_OP_COMPARE |
|
||||
DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE);
|
||||
#endif /* HAVE_D_SET_D_OP */
|
||||
}
|
||||
|
@ -71,7 +71,10 @@ truncate_setsize(struct inode *ip, loff_t new)
|
||||
extern atomic_long_t zfs_bdi_seq;
|
||||
|
||||
static inline int
|
||||
bdi_setup_and_register(struct backing_dev_info *bdi,char *name,unsigned int cap)
|
||||
bdi_setup_and_register(
|
||||
struct backing_dev_info *bdi,
|
||||
char *name,
|
||||
unsigned int cap)
|
||||
{
|
||||
char tmp[32];
|
||||
int error;
|
||||
@ -154,8 +157,11 @@ typedef int zpl_umode_t;
|
||||
|
||||
#if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
|
||||
static inline loff_t
|
||||
lseek_execute(struct file *filp, struct inode *inode,
|
||||
loff_t offset, loff_t maxsize)
|
||||
lseek_execute(
|
||||
struct file *filp,
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t maxsize)
|
||||
{
|
||||
if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
|
||||
return (-EINVAL);
|
||||
@ -224,14 +230,14 @@ zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer) {
|
||||
if ((newer != ACL_NOT_CACHED) && (newer != NULL))
|
||||
posix_acl_dup(newer);
|
||||
|
||||
switch(type) {
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
older = ip->i_acl;
|
||||
rcu_assign_pointer(ip->i_acl,newer);
|
||||
rcu_assign_pointer(ip->i_acl, newer);
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
older = ip->i_default_acl;
|
||||
rcu_assign_pointer(ip->i_default_acl,newer);
|
||||
rcu_assign_pointer(ip->i_default_acl, newer);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -280,7 +286,7 @@ posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode) {
|
||||
}
|
||||
|
||||
static inline int
|
||||
posix_acl_create(struct posix_acl** acl, int flags, umode_t* umodep) {
|
||||
posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep) {
|
||||
struct posix_acl *oldacl = *acl;
|
||||
mode_t mode = *umodep;
|
||||
int error;
|
||||
|
@ -52,14 +52,14 @@ static int \
|
||||
fn(struct dentry *dentry, const char *name, void *buffer, size_t size, \
|
||||
int unused_handler_flags) \
|
||||
{ \
|
||||
return __ ## fn(dentry->d_inode, name, buffer, size); \
|
||||
return (__ ## fn(dentry->d_inode, name, buffer, size)); \
|
||||
}
|
||||
#else
|
||||
#define ZPL_XATTR_GET_WRAPPER(fn) \
|
||||
static int \
|
||||
fn(struct inode *ip, const char *name, void *buffer, size_t size) \
|
||||
{ \
|
||||
return __ ## fn(ip, name, buffer, size); \
|
||||
return (__ ## fn(ip, name, buffer, size)); \
|
||||
}
|
||||
#endif /* HAVE_DENTRY_XATTR_GET */
|
||||
|
||||
@ -74,7 +74,7 @@ static int \
|
||||
fn(struct dentry *dentry, const char *name, const void *buffer, \
|
||||
size_t size, int flags, int unused_handler_flags) \
|
||||
{ \
|
||||
return __ ## fn(dentry->d_inode, name, buffer, size, flags); \
|
||||
return (__ ## fn(dentry->d_inode, name, buffer, size, flags)); \
|
||||
}
|
||||
#else
|
||||
#define ZPL_XATTR_SET_WRAPPER(fn) \
|
||||
@ -82,7 +82,7 @@ static int \
|
||||
fn(struct inode *ip, const char *name, const void *buffer, \
|
||||
size_t size, int flags) \
|
||||
{ \
|
||||
return __ ## fn(ip, name, buffer, size, flags); \
|
||||
return (__ ## fn(ip, name, buffer, size, flags)); \
|
||||
}
|
||||
#endif /* HAVE_DENTRY_XATTR_SET */
|
||||
|
||||
@ -103,27 +103,27 @@ fn(struct inode *ip, const char *name, const void *buffer, \
|
||||
static inline struct posix_acl *
|
||||
zpl_acl_from_xattr(const void *value, int size)
|
||||
{
|
||||
return posix_acl_from_xattr(CRED()->user_ns, value, size);
|
||||
return (posix_acl_from_xattr(CRED()->user_ns, value, size));
|
||||
}
|
||||
|
||||
static inline int
|
||||
zpl_acl_to_xattr(struct posix_acl *acl, void *value, int size)
|
||||
{
|
||||
return posix_acl_to_xattr(CRED()->user_ns,acl, value, size);
|
||||
return (posix_acl_to_xattr(CRED()->user_ns, acl, value, size));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline struct posix_acl *
|
||||
zpl_acl_from_xattr(const void *value,int size)
|
||||
zpl_acl_from_xattr(const void *value, int size)
|
||||
{
|
||||
return posix_acl_from_xattr(value, size);
|
||||
return (posix_acl_from_xattr(value, size));
|
||||
}
|
||||
|
||||
static inline int
|
||||
zpl_acl_to_xattr(struct posix_acl *acl, void *value, int size)
|
||||
{
|
||||
return posix_acl_to_xattr(acl, value, size);
|
||||
return (posix_acl_to_xattr(acl, value, size));
|
||||
}
|
||||
#endif /* HAVE_POSIX_ACL_FROM_XATTR_USERNS */
|
||||
|
||||
|
@ -859,7 +859,7 @@ typedef enum zfs_ioc {
|
||||
/*
|
||||
* zvol ioctl to get dataset name
|
||||
*/
|
||||
#define BLKZNAME _IOR(0x12,125,char[ZFS_MAXNAMELEN])
|
||||
#define BLKZNAME _IOR(0x12, 125, char[ZFS_MAXNAMELEN])
|
||||
|
||||
/*
|
||||
* Internal SPA load state. Used by FMA diagnosis engine.
|
||||
|
@ -70,7 +70,7 @@ typedef struct zfs_sb {
|
||||
krwlock_t z_teardown_inactive_lock;
|
||||
list_t z_all_znodes; /* all znodes in the fs */
|
||||
uint64_t z_nr_znodes; /* number of znodes in the fs */
|
||||
unsigned long z_rollback_time;/* last online rollback time */
|
||||
unsigned long z_rollback_time; /* last online rollback time */
|
||||
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
|
||||
struct inode *z_ctldir; /* .zfs directory inode */
|
||||
avl_tree_t z_ctldir_snaps; /* .zfs/snapshot entries */
|
||||
|
@ -210,8 +210,8 @@ typedef struct znode {
|
||||
kmutex_t z_acl_lock; /* acl data lock */
|
||||
zfs_acl_t *z_acl_cached; /* cached acl */
|
||||
krwlock_t z_xattr_lock; /* xattr data lock */
|
||||
nvlist_t *z_xattr_cached;/* cached xattrs */
|
||||
struct znode *z_xattr_parent;/* xattr parent znode */
|
||||
nvlist_t *z_xattr_cached; /* cached xattrs */
|
||||
struct znode *z_xattr_parent; /* xattr parent znode */
|
||||
list_node_t z_link_node; /* all znodes in fs link */
|
||||
sa_handle_t *z_sa_hdl; /* handle to sa data */
|
||||
boolean_t z_is_sa; /* are we native sa? */
|
||||
|
@ -77,7 +77,7 @@ extern int zpl_set_acl(struct inode *ip, int type, struct posix_acl *acl);
|
||||
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type);
|
||||
#if !defined(HAVE_GET_ACL)
|
||||
#if defined(HAVE_CHECK_ACL_WITH_FLAGS)
|
||||
extern int zpl_check_acl(struct inode *inode, int mask,unsigned int flags);
|
||||
extern int zpl_check_acl(struct inode *inode, int mask, unsigned int flags);
|
||||
#elif defined(HAVE_CHECK_ACL)
|
||||
extern int zpl_check_acl(struct inode *inode, int mask);
|
||||
#elif defined(HAVE_PERMISSION_WITH_NAMEIDATA)
|
||||
@ -145,21 +145,22 @@ static inline bool
|
||||
dir_emit(struct dir_context *ctx, const char *name, int namelen,
|
||||
uint64_t ino, unsigned type)
|
||||
{
|
||||
return ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type) == 0;
|
||||
return (ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type)
|
||||
== 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
dir_emit_dot(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
return ctx->actor(ctx->dirent, ".", 1, ctx->pos,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
|
||||
return (ctx->actor(ctx->dirent, ".", 1, ctx->pos,
|
||||
file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
dir_emit_dotdot(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
return ctx->actor(ctx->dirent, "..", 2, ctx->pos,
|
||||
parent_ino(file->f_path.dentry), DT_DIR) == 0;
|
||||
return (ctx->actor(ctx->dirent, "..", 2, ctx->pos,
|
||||
parent_ino(file->f_path.dentry), DT_DIR) == 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
@ -167,15 +168,15 @@ dir_emit_dots(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
if (ctx->pos == 0) {
|
||||
if (!dir_emit_dot(file, ctx))
|
||||
return false;
|
||||
return (false);
|
||||
ctx->pos = 1;
|
||||
}
|
||||
if (ctx->pos == 1) {
|
||||
if (!dir_emit_dotdot(file, ctx))
|
||||
return false;
|
||||
return (false);
|
||||
ctx->pos = 2;
|
||||
}
|
||||
return true;
|
||||
return (true);
|
||||
}
|
||||
#endif /* HAVE_VFS_ITERATE */
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
@ -29,12 +29,13 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#ifndef _ZPIOS_CTL_H
|
||||
#define _ZPIOS_CTL_H
|
||||
|
||||
/* Contains shared definitions which both the userspace
|
||||
/*
|
||||
* Contains shared definitions which both the userspace
|
||||
* and kernelspace portions of zpios must agree on.
|
||||
*/
|
||||
#ifndef _KERNEL
|
||||
@ -137,7 +138,8 @@ typedef struct zpios_cmd {
|
||||
#endif
|
||||
|
||||
static inline
|
||||
void zpios_timespec_normalize(zpios_timespec_t *ts, uint32_t sec, uint32_t nsec)
|
||||
void
|
||||
zpios_timespec_normalize(zpios_timespec_t *ts, uint32_t sec, uint32_t nsec)
|
||||
{
|
||||
while (nsec >= NSEC_PER_SEC) {
|
||||
nsec -= NSEC_PER_SEC;
|
||||
@ -152,27 +154,30 @@ void zpios_timespec_normalize(zpios_timespec_t *ts, uint32_t sec, uint32_t nsec)
|
||||
}
|
||||
|
||||
static inline
|
||||
zpios_timespec_t zpios_timespec_add(zpios_timespec_t lhs, zpios_timespec_t rhs)
|
||||
zpios_timespec_t
|
||||
zpios_timespec_add(zpios_timespec_t lhs, zpios_timespec_t rhs)
|
||||
{
|
||||
zpios_timespec_t ts_delta;
|
||||
zpios_timespec_normalize(&ts_delta, lhs.ts_sec + rhs.ts_sec,
|
||||
lhs.ts_nsec + rhs.ts_nsec);
|
||||
return ts_delta;
|
||||
return (ts_delta);
|
||||
}
|
||||
|
||||
static inline
|
||||
zpios_timespec_t zpios_timespec_sub(zpios_timespec_t lhs, zpios_timespec_t rhs)
|
||||
zpios_timespec_t
|
||||
zpios_timespec_sub(zpios_timespec_t lhs, zpios_timespec_t rhs)
|
||||
{
|
||||
zpios_timespec_t ts_delta;
|
||||
zpios_timespec_normalize(&ts_delta, lhs.ts_sec - rhs.ts_sec,
|
||||
lhs.ts_nsec - rhs.ts_nsec);
|
||||
return ts_delta;
|
||||
return (ts_delta);
|
||||
}
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
static inline
|
||||
zpios_timespec_t zpios_timespec_now(void)
|
||||
zpios_timespec_t
|
||||
zpios_timespec_now(void)
|
||||
{
|
||||
zpios_timespec_t zts_now;
|
||||
struct timespec ts_now;
|
||||
@ -181,15 +186,17 @@ zpios_timespec_t zpios_timespec_now(void)
|
||||
zts_now.ts_sec = ts_now.tv_sec;
|
||||
zts_now.ts_nsec = ts_now.tv_nsec;
|
||||
|
||||
return zts_now;
|
||||
return (zts_now);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline
|
||||
double zpios_timespec_to_double(zpios_timespec_t ts)
|
||||
double
|
||||
zpios_timespec_to_double(zpios_timespec_t ts)
|
||||
{
|
||||
return ((double)(ts.ts_sec) +
|
||||
return
|
||||
((double)(ts.ts_sec) +
|
||||
((double)(ts.ts_nsec) / (double)(NSEC_PER_SEC)));
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#ifndef _ZPIOS_INTERNAL_H
|
||||
#define _ZPIOS_INTERNAL_H
|
||||
@ -109,30 +109,4 @@ typedef struct zpios_info {
|
||||
char *info_head; /* Internal kernel use only */
|
||||
} zpios_info_t;
|
||||
|
||||
#define zpios_print(file, format, args...) \
|
||||
({ zpios_info_t *_info_ = (zpios_info_t *)file->private_data; \
|
||||
int _rc_; \
|
||||
\
|
||||
ASSERT(_info_); \
|
||||
ASSERT(_info_->info_buffer); \
|
||||
\
|
||||
spin_lock(&_info_->info_lock); \
|
||||
\
|
||||
/* Don't allow the kernel to start a write in the red zone */ \
|
||||
if ((int)(_info_->info_head - _info_->info_buffer) > \
|
||||
(_info_->info_size - ZPIOS_INFO_BUFFER_REDZONE)) { \
|
||||
_rc_ = -EOVERFLOW; \
|
||||
} else { \
|
||||
_rc_ = sprintf(_info_->info_head, format, args); \
|
||||
if (_rc_ >= 0) \
|
||||
_info_->info_head += _rc_; \
|
||||
} \
|
||||
\
|
||||
spin_unlock(&_info_->info_lock); \
|
||||
_rc_; \
|
||||
})
|
||||
|
||||
#define zpios_vprint(file, test, format, args...) \
|
||||
zpios_print(file, "%*s: " format, ZPIOS_NAME_SIZE, test, args)
|
||||
|
||||
#endif /* _ZPIOS_INTERNAL_H */
|
||||
|
@ -152,7 +152,7 @@ efi_get_info(int fd, struct dk_cinfo *dki_info)
|
||||
char *dev_path;
|
||||
int rval = 0;
|
||||
|
||||
memset(dki_info, 0, sizeof(*dki_info));
|
||||
memset(dki_info, 0, sizeof (*dki_info));
|
||||
|
||||
path = calloc(PATH_MAX, 1);
|
||||
if (path == NULL)
|
||||
@ -395,10 +395,10 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
*/
|
||||
if (read_disk_info(fd, &capacity, &lbsize) == -1) {
|
||||
if (efi_debug)
|
||||
fprintf(stderr,"unable to read disk info: %d",errno);
|
||||
fprintf(stderr, "unable to read disk info: %d", errno);
|
||||
|
||||
errno = EIO;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
@ -416,7 +416,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
if (efi_debug)
|
||||
(void) fprintf(stderr, "DKIOCGETEFI lseek "
|
||||
"error: %d\n", errno);
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
error = read(fd, data, dk_ioc->dki_length);
|
||||
@ -424,7 +424,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
if (efi_debug)
|
||||
(void) fprintf(stderr, "DKIOCGETEFI read "
|
||||
"error: %d\n", errno);
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
if (error != dk_ioc->dki_length) {
|
||||
@ -432,7 +432,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
(void) fprintf(stderr, "DKIOCGETEFI short "
|
||||
"read of %d bytes\n", error);
|
||||
errno = EIO;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
error = 0;
|
||||
break;
|
||||
@ -443,7 +443,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
(void) fprintf(stderr, "DKIOCSETEFI unknown "
|
||||
"LBA size\n");
|
||||
errno = EIO;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
error = lseek(fd, dk_ioc->dki_lba * lbsize, SEEK_SET);
|
||||
@ -451,7 +451,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
if (efi_debug)
|
||||
(void) fprintf(stderr, "DKIOCSETEFI lseek "
|
||||
"error: %d\n", errno);
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
error = write(fd, data, dk_ioc->dki_length);
|
||||
@ -459,7 +459,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
if (efi_debug)
|
||||
(void) fprintf(stderr, "DKIOCSETEFI write "
|
||||
"error: %d\n", errno);
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
if (error != dk_ioc->dki_length) {
|
||||
@ -467,17 +467,17 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
(void) fprintf(stderr, "DKIOCSETEFI short "
|
||||
"write of %d bytes\n", error);
|
||||
errno = EIO;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* Sync the new EFI table to disk */
|
||||
error = fsync(fd);
|
||||
if (error == -1)
|
||||
return error;
|
||||
return (error);
|
||||
|
||||
/* Ensure any local disk cache is also flushed */
|
||||
if (ioctl(fd, BLKFLSBUF, 0) == -1)
|
||||
return error;
|
||||
return (error);
|
||||
|
||||
error = 0;
|
||||
break;
|
||||
@ -487,7 +487,7 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
(void) fprintf(stderr, "unsupported ioctl()\n");
|
||||
|
||||
errno = EIO;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
#else
|
||||
dk_ioc->dki_data_64 = (uint64_t)(uintptr_t)data;
|
||||
@ -497,7 +497,8 @@ efi_ioctl(int fd, int cmd, dk_efi_t *dk_ioc)
|
||||
return (error);
|
||||
}
|
||||
|
||||
int efi_rescan(int fd)
|
||||
int
|
||||
efi_rescan(int fd)
|
||||
{
|
||||
#if defined(__linux__)
|
||||
int retry = 5;
|
||||
@ -548,7 +549,7 @@ check_label(int fd, dk_efi_t *dk_ioc)
|
||||
efi->efi_gpt_HeaderCRC32 = 0;
|
||||
len_t headerSize = (len_t)LE_32(efi->efi_gpt_HeaderSize);
|
||||
|
||||
if(headerSize < EFI_MIN_LABEL_SIZE || headerSize > EFI_LABEL_SIZE) {
|
||||
if (headerSize < EFI_MIN_LABEL_SIZE || headerSize > EFI_LABEL_SIZE) {
|
||||
if (efi_debug)
|
||||
(void) fprintf(stderr,
|
||||
"Invalid EFI HeaderSize %llu. Assuming %d.\n",
|
||||
@ -590,7 +591,7 @@ efi_read(int fd, struct dk_gpt *vtoc)
|
||||
* get the partition number for this file descriptor.
|
||||
*/
|
||||
if ((rval = efi_get_info(fd, &dki_info)) != 0)
|
||||
return rval;
|
||||
return (rval);
|
||||
|
||||
if ((strncmp(dki_info.dki_cname, "pseudo", 7) == 0) &&
|
||||
(strncmp(dki_info.dki_dname, "md", 3) == 0)) {
|
||||
@ -1117,7 +1118,7 @@ efi_write(int fd, struct dk_gpt *vtoc)
|
||||
diskaddr_t lba_backup_gpt_hdr;
|
||||
|
||||
if ((rval = efi_get_info(fd, &dki_info)) != 0)
|
||||
return rval;
|
||||
return (rval);
|
||||
|
||||
/* check if we are dealing wih a metadevice */
|
||||
if ((strncmp(dki_info.dki_cname, "pseudo", 7) == 0) &&
|
||||
|
@ -64,7 +64,7 @@ register_fstype(const char *name, const sa_share_ops_t *ops)
|
||||
fstype = calloc(sizeof (sa_fstype_t), 1);
|
||||
|
||||
if (fstype == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
fstype->name = name;
|
||||
fstype->ops = ops;
|
||||
@ -75,7 +75,7 @@ register_fstype(const char *name, const sa_share_ops_t *ops)
|
||||
fstype->next = fstypes;
|
||||
fstypes = fstype;
|
||||
|
||||
return fstype;
|
||||
return (fstype);
|
||||
}
|
||||
|
||||
sa_handle_t
|
||||
@ -86,7 +86,7 @@ sa_init(int init_service)
|
||||
impl_handle = calloc(sizeof (struct sa_handle_impl), 1);
|
||||
|
||||
if (impl_handle == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
impl_handle->zfs_libhandle = libzfs_init();
|
||||
|
||||
@ -243,30 +243,30 @@ update_zfs_shares_cb(zfs_handle_t *zhp, void *pcookie)
|
||||
if (type == ZFS_TYPE_FILESYSTEM &&
|
||||
zfs_iter_filesystems(zhp, update_zfs_shares_cb, pcookie) != 0) {
|
||||
zfs_close(zhp);
|
||||
return 1;
|
||||
return (1);
|
||||
}
|
||||
|
||||
if (type != ZFS_TYPE_FILESYSTEM) {
|
||||
zfs_close(zhp);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
|
||||
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) != 0) {
|
||||
zfs_close(zhp);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
dataset = (char *)zfs_get_name(zhp);
|
||||
|
||||
if (dataset == NULL) {
|
||||
zfs_close(zhp);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (!zfs_is_mounted(zhp, NULL)) {
|
||||
zfs_close(zhp);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if ((udata->proto == NULL || strcmp(udata->proto, "nfs") == 0) &&
|
||||
@ -287,7 +287,7 @@ update_zfs_shares_cb(zfs_handle_t *zhp, void *pcookie)
|
||||
|
||||
zfs_close(zhp);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -298,7 +298,7 @@ update_zfs_share(sa_share_impl_t impl_share, const char *proto)
|
||||
update_cookie_t udata;
|
||||
|
||||
if (impl_handle->zfs_libhandle == NULL)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
assert(impl_share->dataset != NULL);
|
||||
|
||||
@ -306,13 +306,13 @@ update_zfs_share(sa_share_impl_t impl_share, const char *proto)
|
||||
ZFS_TYPE_FILESYSTEM);
|
||||
|
||||
if (zhp == NULL)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
udata.handle = impl_handle;
|
||||
udata.proto = proto;
|
||||
(void) update_zfs_shares_cb(zhp, &udata);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -321,14 +321,14 @@ update_zfs_shares(sa_handle_impl_t impl_handle, const char *proto)
|
||||
update_cookie_t udata;
|
||||
|
||||
if (impl_handle->zfs_libhandle == NULL)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
udata.handle = impl_handle;
|
||||
udata.proto = proto;
|
||||
(void) zfs_iter_root(impl_handle->zfs_libhandle, update_zfs_shares_cb,
|
||||
&udata);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -351,7 +351,7 @@ process_share(sa_handle_impl_t impl_handle, sa_share_impl_t impl_share,
|
||||
if (impl_share == NULL) {
|
||||
if (lstat(pathname, &statbuf) != 0 ||
|
||||
!S_ISDIR(statbuf.st_mode))
|
||||
return SA_BAD_PATH;
|
||||
return (SA_BAD_PATH);
|
||||
|
||||
impl_share = alloc_share(pathname);
|
||||
|
||||
@ -421,7 +421,7 @@ err:
|
||||
free_share(impl_share);
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
void
|
||||
@ -487,13 +487,13 @@ find_share(sa_handle_impl_t impl_handle, const char *sharepath)
|
||||
impl_share = impl_share->next;
|
||||
}
|
||||
|
||||
return impl_share;
|
||||
return (impl_share);
|
||||
}
|
||||
|
||||
sa_share_t
|
||||
sa_find_share(sa_handle_t handle, char *sharepath)
|
||||
{
|
||||
return (sa_share_t)find_share((sa_handle_impl_t)handle, sharepath);
|
||||
return ((sa_share_t)find_share((sa_handle_impl_t)handle, sharepath));
|
||||
}
|
||||
|
||||
int
|
||||
@ -715,16 +715,16 @@ sa_parse_legacy_options(sa_group_t group, char *options, char *proto)
|
||||
continue;
|
||||
}
|
||||
|
||||
return fstype->ops->validate_shareopts(options);
|
||||
return (fstype->ops->validate_shareopts(options));
|
||||
}
|
||||
|
||||
return SA_INVALID_PROTOCOL;
|
||||
return (SA_INVALID_PROTOCOL);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
sa_needs_refresh(sa_handle_t handle)
|
||||
{
|
||||
return B_TRUE;
|
||||
return (B_TRUE);
|
||||
}
|
||||
|
||||
libzfs_handle_t *
|
||||
@ -733,9 +733,9 @@ sa_get_zfs_handle(sa_handle_t handle)
|
||||
sa_handle_impl_t impl_handle = (sa_handle_impl_t)handle;
|
||||
|
||||
if (impl_handle == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
return impl_handle->zfs_libhandle;
|
||||
return (impl_handle->zfs_libhandle);
|
||||
}
|
||||
|
||||
static sa_share_impl_t
|
||||
@ -746,13 +746,13 @@ alloc_share(const char *sharepath)
|
||||
impl_share = calloc(sizeof (struct sa_share_impl), 1);
|
||||
|
||||
if (impl_share == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
impl_share->sharepath = strdup(sharepath);
|
||||
|
||||
if (impl_share->sharepath == NULL) {
|
||||
free(impl_share);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
impl_share->fsinfo = calloc(sizeof (sa_share_fsinfo_t), fstypes_count);
|
||||
@ -760,10 +760,10 @@ alloc_share(const char *sharepath)
|
||||
if (impl_share->fsinfo == NULL) {
|
||||
free(impl_share->sharepath);
|
||||
free(impl_share);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return impl_share;
|
||||
return (impl_share);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -799,8 +799,8 @@ sa_zfs_process_share(sa_handle_t handle, sa_group_t group, sa_share_t share,
|
||||
shareopts, sourcestr, dataset);
|
||||
#endif
|
||||
|
||||
return process_share(impl_handle, impl_share, mountpoint, NULL,
|
||||
proto, shareopts, NULL, dataset, B_FALSE);
|
||||
return (process_share(impl_handle, impl_share, mountpoint, NULL,
|
||||
proto, shareopts, NULL, dataset, B_FALSE));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -50,7 +50,7 @@ typedef int (*nfs_shareopt_callback_t)(const char *opt, const char *value,
|
||||
typedef int (*nfs_host_callback_t)(const char *sharepath, const char *host,
|
||||
const char *security, const char *access, void *cookie);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Invokes the specified callback function for each Solaris share option
|
||||
* listed in the specified string.
|
||||
*/
|
||||
@ -62,12 +62,12 @@ foreach_nfs_shareopt(const char *shareopts,
|
||||
int was_nul, rc;
|
||||
|
||||
if (shareopts == NULL)
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
|
||||
shareopts_dup = strdup(shareopts);
|
||||
|
||||
if (shareopts_dup == NULL)
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
|
||||
opt = shareopts_dup;
|
||||
was_nul = 0;
|
||||
@ -95,7 +95,7 @@ foreach_nfs_shareopt(const char *shareopts,
|
||||
|
||||
if (rc != SA_OK) {
|
||||
free(shareopts_dup);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ foreach_nfs_shareopt(const char *shareopts,
|
||||
|
||||
free(shareopts_dup);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
typedef struct nfs_host_cookie_s {
|
||||
@ -117,7 +117,7 @@ typedef struct nfs_host_cookie_s {
|
||||
const char *security;
|
||||
} nfs_host_cookie_t;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function for foreach_nfs_host. This function checks whether the
|
||||
* current share option is a host specification and invokes a callback
|
||||
* function with information about the host.
|
||||
@ -146,7 +146,7 @@ foreach_nfs_host_cb(const char *opt, const char *value, void *pcookie)
|
||||
host_dup = strdup(value);
|
||||
|
||||
if (host_dup == NULL)
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
|
||||
host = host_dup;
|
||||
|
||||
@ -163,7 +163,7 @@ foreach_nfs_host_cb(const char *opt, const char *value, void *pcookie)
|
||||
if (rc != SA_OK) {
|
||||
free(host_dup);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
host = next;
|
||||
@ -172,10 +172,10 @@ foreach_nfs_host_cb(const char *opt, const char *value, void *pcookie)
|
||||
free(host_dup);
|
||||
}
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Invokes a callback function for all NFS hosts that are set for a share.
|
||||
*/
|
||||
static int
|
||||
@ -196,7 +196,7 @@ foreach_nfs_host(sa_share_impl_t impl_share, nfs_host_callback_t callback,
|
||||
&udata);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Converts a Solaris NFS host specification to its Linux equivalent.
|
||||
*/
|
||||
static int
|
||||
@ -217,13 +217,13 @@ get_linux_hostspec(const char *solaris_hostspec, char **plinux_hostspec)
|
||||
}
|
||||
|
||||
if (*plinux_hostspec == NULL) {
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
}
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Used internally by nfs_enable_share to enable sharing for a single host.
|
||||
*/
|
||||
static int
|
||||
@ -281,12 +281,12 @@ nfs_enable_share_one(const char *sharepath, const char *host,
|
||||
free(opts);
|
||||
|
||||
if (rc < 0)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
else
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Adds a Linux share option to an array of NFS options.
|
||||
*/
|
||||
static int
|
||||
@ -302,7 +302,7 @@ add_linux_shareopt(char **plinux_opts, const char *key, const char *value)
|
||||
(value ? 1 + strlen(value) : 0) + 1);
|
||||
|
||||
if (new_linux_opts == NULL)
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
|
||||
new_linux_opts[len] = '\0';
|
||||
|
||||
@ -318,10 +318,10 @@ add_linux_shareopt(char **plinux_opts, const char *key, const char *value)
|
||||
|
||||
*plinux_opts = new_linux_opts;
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Validates and converts a single Solaris share option to its Linux
|
||||
* equivalent.
|
||||
*/
|
||||
@ -333,7 +333,7 @@ get_linux_shareopts_cb(const char *key, const char *value, void *cookie)
|
||||
/* host-specific options, these are taken care of elsewhere */
|
||||
if (strcmp(key, "ro") == 0 || strcmp(key, "rw") == 0 ||
|
||||
strcmp(key, "sec") == 0)
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
|
||||
if (strcmp(key, "anon") == 0)
|
||||
key = "anonuid";
|
||||
@ -364,15 +364,15 @@ get_linux_shareopts_cb(const char *key, const char *value, void *cookie)
|
||||
strcmp(key, "all_squash") != 0 &&
|
||||
strcmp(key, "no_all_squash") != 0 && strcmp(key, "fsid") != 0 &&
|
||||
strcmp(key, "anonuid") != 0 && strcmp(key, "anongid") != 0) {
|
||||
return SA_SYNTAX_ERR;
|
||||
return (SA_SYNTAX_ERR);
|
||||
}
|
||||
|
||||
(void) add_linux_shareopt(plinux_opts, key, value);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Takes a string containing Solaris share options (e.g. "sync,no_acl") and
|
||||
* converts them to a NULL-terminated array of Linux NFS options.
|
||||
*/
|
||||
@ -390,17 +390,18 @@ get_linux_shareopts(const char *shareopts, char **plinux_opts)
|
||||
(void) add_linux_shareopt(plinux_opts, "no_root_squash", NULL);
|
||||
(void) add_linux_shareopt(plinux_opts, "mountpoint", NULL);
|
||||
|
||||
rc = foreach_nfs_shareopt(shareopts, get_linux_shareopts_cb, plinux_opts);
|
||||
rc = foreach_nfs_shareopt(shareopts, get_linux_shareopts_cb,
|
||||
plinux_opts);
|
||||
|
||||
if (rc != SA_OK) {
|
||||
free(*plinux_opts);
|
||||
*plinux_opts = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enables NFS sharing for the specified share.
|
||||
*/
|
||||
static int
|
||||
@ -410,27 +411,27 @@ nfs_enable_share(sa_share_impl_t impl_share)
|
||||
int rc;
|
||||
|
||||
if (!nfs_available()) {
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
}
|
||||
|
||||
shareopts = FSINFO(impl_share, nfs_fstype)->shareopts;
|
||||
|
||||
if (shareopts == NULL)
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
|
||||
rc = get_linux_shareopts(shareopts, &linux_opts);
|
||||
|
||||
if (rc != SA_OK)
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
rc = foreach_nfs_host(impl_share, nfs_enable_share_one, linux_opts);
|
||||
|
||||
free(linux_opts);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Used internally by nfs_disable_share to disable sharing for a single host.
|
||||
*/
|
||||
static int
|
||||
@ -471,12 +472,12 @@ nfs_disable_share_one(const char *sharepath, const char *host,
|
||||
free(hostpath);
|
||||
|
||||
if (rc < 0)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
else
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Disables NFS sharing for the specified share.
|
||||
*/
|
||||
static int
|
||||
@ -487,13 +488,13 @@ nfs_disable_share(sa_share_impl_t impl_share)
|
||||
* The share can't possibly be active, so nothing
|
||||
* needs to be done to disable it.
|
||||
*/
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
return foreach_nfs_host(impl_share, nfs_disable_share_one, NULL);
|
||||
return (foreach_nfs_host(impl_share, nfs_disable_share_one, NULL));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Checks whether the specified NFS share options are syntactically correct.
|
||||
*/
|
||||
static int
|
||||
@ -505,14 +506,14 @@ nfs_validate_shareopts(const char *shareopts)
|
||||
rc = get_linux_shareopts(shareopts, &linux_opts);
|
||||
|
||||
if (rc != SA_OK)
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
free(linux_opts);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Checks whether a share is currently active.
|
||||
*/
|
||||
static boolean_t
|
||||
@ -523,17 +524,17 @@ nfs_is_share_active(sa_share_impl_t impl_share)
|
||||
FILE *nfs_exportfs_temp_fp;
|
||||
|
||||
if (!nfs_available())
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
|
||||
nfs_exportfs_temp_fp = fdopen(dup(nfs_exportfs_temp_fd), "r");
|
||||
|
||||
if (nfs_exportfs_temp_fp == NULL ||
|
||||
fseek(nfs_exportfs_temp_fp, 0, SEEK_SET) < 0) {
|
||||
fclose(nfs_exportfs_temp_fp);
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
while (fgets(line, sizeof(line), nfs_exportfs_temp_fp) != NULL) {
|
||||
while (fgets(line, sizeof (line), nfs_exportfs_temp_fp) != NULL) {
|
||||
/*
|
||||
* exportfs uses separate lines for the share path
|
||||
* and the export options when the share path is longer
|
||||
@ -564,16 +565,16 @@ nfs_is_share_active(sa_share_impl_t impl_share)
|
||||
|
||||
if (strcmp(line, impl_share->sharepath) == 0) {
|
||||
fclose(nfs_exportfs_temp_fp);
|
||||
return B_TRUE;
|
||||
return (B_TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(nfs_exportfs_temp_fp);
|
||||
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to update a share's options. A share's options might be out of
|
||||
* date if the share was loaded from disk (i.e. /etc/dfs/sharetab) and the
|
||||
* "sharenfs" dataset property has changed in the meantime. This function
|
||||
@ -604,7 +605,7 @@ nfs_update_shareopts(sa_share_impl_t impl_share, const char *resource,
|
||||
shareopts_dup = strdup(shareopts);
|
||||
|
||||
if (shareopts_dup == NULL)
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
|
||||
if (old_shareopts != NULL)
|
||||
free(old_shareopts);
|
||||
@ -614,10 +615,10 @@ nfs_update_shareopts(sa_share_impl_t impl_share, const char *resource,
|
||||
if (needs_reshare)
|
||||
nfs_enable_share(impl_share);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Clears a share's NFS options. Used by libshare to
|
||||
* clean up shares that are about to be free()'d.
|
||||
*/
|
||||
@ -666,7 +667,7 @@ nfs_check_exportfs(void)
|
||||
nfs_exportfs_temp_fd = mkstemp(nfs_exportfs_tempfile);
|
||||
|
||||
if (nfs_exportfs_temp_fd < 0)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
unlink(nfs_exportfs_tempfile);
|
||||
|
||||
@ -677,26 +678,25 @@ nfs_check_exportfs(void)
|
||||
if (pid < 0) {
|
||||
(void) close(nfs_exportfs_temp_fd);
|
||||
nfs_exportfs_temp_fd = -1;
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
}
|
||||
|
||||
if (pid > 0) {
|
||||
while ((rc = waitpid(pid, &status, 0)) <= 0 && errno == EINTR)
|
||||
; /* empty loop body */
|
||||
while ((rc = waitpid(pid, &status, 0)) <= 0 && errno == EINTR);
|
||||
|
||||
if (rc <= 0) {
|
||||
(void) close(nfs_exportfs_temp_fd);
|
||||
nfs_exportfs_temp_fd = -1;
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
}
|
||||
|
||||
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
|
||||
(void) close(nfs_exportfs_temp_fd);
|
||||
nfs_exportfs_temp_fd = -1;
|
||||
return SA_CONFIG_ERR;
|
||||
return (SA_CONFIG_ERR);
|
||||
}
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/* child */
|
||||
@ -724,10 +724,10 @@ nfs_available(void)
|
||||
if (nfs_exportfs_temp_fd == -1)
|
||||
(void) nfs_check_exportfs();
|
||||
|
||||
return (nfs_exportfs_temp_fd != -1) ? B_TRUE : B_FALSE;
|
||||
return ((nfs_exportfs_temp_fd != -1) ? B_TRUE : B_FALSE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initializes the NFS functionality of libshare.
|
||||
*/
|
||||
void
|
||||
|
@ -26,7 +26,7 @@
|
||||
*
|
||||
* This is an addition to the zfs device driver to add, modify and remove SMB
|
||||
* shares using the 'net share' command that comes with Samba.
|
||||
|
||||
*
|
||||
* TESTING
|
||||
* Make sure that samba listens to 'localhost' (127.0.0.1) and that the options
|
||||
* 'usershare max shares' and 'usershare owner only' have been rewied/set
|
||||
@ -64,7 +64,7 @@ static boolean_t smb_available(void);
|
||||
|
||||
static sa_fstype_t *smb_fstype;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Retrieve the list of SMB shares.
|
||||
*/
|
||||
static int
|
||||
@ -83,7 +83,7 @@ smb_retrieve_shares(void)
|
||||
/* opendir(), stat() */
|
||||
shares_dir = opendir(SHARE_DIR);
|
||||
if (shares_dir == NULL)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
/* Go through the directory, looking for shares */
|
||||
while ((directory = readdir(shares_dir))) {
|
||||
@ -112,7 +112,7 @@ smb_retrieve_shares(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (fgets(line, sizeof(line), share_file_fp)) {
|
||||
while (fgets(line, sizeof (line), share_file_fp)) {
|
||||
if (line[0] == '#')
|
||||
continue;
|
||||
|
||||
@ -155,15 +155,16 @@ smb_retrieve_shares(void)
|
||||
|
||||
strncpy(shares->name, name,
|
||||
sizeof (shares->name));
|
||||
shares->name [sizeof(shares->name)-1] = '\0';
|
||||
shares->name [sizeof (shares->name) - 1] = '\0';
|
||||
|
||||
strncpy(shares->path, path,
|
||||
sizeof (shares->path));
|
||||
shares->path [sizeof(shares->path)-1] = '\0';
|
||||
shares->path [sizeof (shares->path) - 1] = '\0';
|
||||
|
||||
strncpy(shares->comment, comment,
|
||||
sizeof (shares->comment));
|
||||
shares->comment[sizeof(shares->comment)-1]='\0';
|
||||
shares->comment[sizeof (shares->comment)-1] =
|
||||
'\0';
|
||||
|
||||
shares->guest_ok = atoi(guest_ok);
|
||||
|
||||
@ -190,10 +191,10 @@ out:
|
||||
|
||||
smb_shares = new_shares;
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Used internally by smb_enable_share to enable sharing for a single host.
|
||||
*/
|
||||
static int
|
||||
@ -204,8 +205,8 @@ smb_enable_share_one(const char *sharename, const char *sharepath)
|
||||
int rc;
|
||||
|
||||
/* Support ZFS share name regexp '[[:alnum:]_-.: ]' */
|
||||
strncpy(name, sharename, sizeof(name));
|
||||
name [sizeof(name)-1] = '\0';
|
||||
strncpy(name, sharename, sizeof (name));
|
||||
name [sizeof (name)-1] = '\0';
|
||||
|
||||
pos = name;
|
||||
while (*pos != '\0') {
|
||||
@ -220,32 +221,34 @@ smb_enable_share_one(const char *sharename, const char *sharepath)
|
||||
++pos;
|
||||
}
|
||||
|
||||
/* CMD: net -S NET_CMD_ARG_HOST usershare add Test1 /share/Test1 \
|
||||
* "Comment" "Everyone:F" */
|
||||
snprintf(comment, sizeof(comment), "Comment: %s", sharepath);
|
||||
/*
|
||||
* CMD: net -S NET_CMD_ARG_HOST usershare add Test1 /share/Test1 \
|
||||
* "Comment" "Everyone:F"
|
||||
*/
|
||||
snprintf(comment, sizeof (comment), "Comment: %s", sharepath);
|
||||
|
||||
argv[0] = NET_CMD_PATH;
|
||||
argv[1] = (char*)"-S";
|
||||
argv[1] = (char *)"-S";
|
||||
argv[2] = NET_CMD_ARG_HOST;
|
||||
argv[3] = (char*)"usershare";
|
||||
argv[4] = (char*)"add";
|
||||
argv[5] = (char*)name;
|
||||
argv[6] = (char*)sharepath;
|
||||
argv[7] = (char*)comment;
|
||||
argv[3] = (char *)"usershare";
|
||||
argv[4] = (char *)"add";
|
||||
argv[5] = (char *)name;
|
||||
argv[6] = (char *)sharepath;
|
||||
argv[7] = (char *)comment;
|
||||
argv[8] = "Everyone:F";
|
||||
argv[9] = NULL;
|
||||
|
||||
rc = libzfs_run_process(argv[0], argv, 0);
|
||||
if (rc < 0)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
/* Reload the share file */
|
||||
(void) smb_retrieve_shares();
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enables SMB sharing for the specified share.
|
||||
*/
|
||||
static int
|
||||
@ -254,20 +257,21 @@ smb_enable_share(sa_share_impl_t impl_share)
|
||||
char *shareopts;
|
||||
|
||||
if (!smb_available())
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
shareopts = FSINFO(impl_share, smb_fstype)->shareopts;
|
||||
if (shareopts == NULL) /* on/off */
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
if (strcmp(shareopts, "off") == 0)
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
|
||||
/* Magic: Enable (i.e., 'create new') share */
|
||||
return smb_enable_share_one(impl_share->dataset, impl_share->sharepath);
|
||||
return (smb_enable_share_one(impl_share->dataset,
|
||||
impl_share->sharepath));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Used internally by smb_disable_share to disable sharing for a single host.
|
||||
*/
|
||||
static int
|
||||
@ -278,21 +282,21 @@ smb_disable_share_one(const char *sharename)
|
||||
|
||||
/* CMD: net -S NET_CMD_ARG_HOST usershare delete Test1 */
|
||||
argv[0] = NET_CMD_PATH;
|
||||
argv[1] = (char*)"-S";
|
||||
argv[1] = (char *)"-S";
|
||||
argv[2] = NET_CMD_ARG_HOST;
|
||||
argv[3] = (char*)"usershare";
|
||||
argv[4] = (char*)"delete";
|
||||
argv[3] = (char *)"usershare";
|
||||
argv[4] = (char *)"delete";
|
||||
argv[5] = strdup(sharename);
|
||||
argv[6] = NULL;
|
||||
|
||||
rc = libzfs_run_process(argv[0], argv, 0);
|
||||
if (rc < 0)
|
||||
return SA_SYSTEM_ERR;
|
||||
return (SA_SYSTEM_ERR);
|
||||
else
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Disables SMB sharing for the specified share.
|
||||
*/
|
||||
static int
|
||||
@ -305,20 +309,20 @@ smb_disable_share(sa_share_impl_t impl_share)
|
||||
* The share can't possibly be active, so nothing
|
||||
* needs to be done to disable it.
|
||||
*/
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
while (shares != NULL) {
|
||||
if (strcmp(impl_share->sharepath, shares->path) == 0)
|
||||
return smb_disable_share_one(shares->name);
|
||||
return (smb_disable_share_one(shares->name));
|
||||
|
||||
shares = shares->next;
|
||||
}
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Checks whether the specified SMB share options are syntactically correct.
|
||||
*/
|
||||
static int
|
||||
@ -326,34 +330,34 @@ smb_validate_shareopts(const char *shareopts)
|
||||
{
|
||||
/* TODO: Accept 'name' and sec/acl (?) */
|
||||
if ((strcmp(shareopts, "off") == 0) || (strcmp(shareopts, "on") == 0))
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
|
||||
return SA_SYNTAX_ERR;
|
||||
return (SA_SYNTAX_ERR);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Checks whether a share is currently active.
|
||||
*/
|
||||
static boolean_t
|
||||
smb_is_share_active(sa_share_impl_t impl_share)
|
||||
{
|
||||
if (!smb_available())
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
|
||||
/* Retrieve the list of (possible) active shares */
|
||||
smb_retrieve_shares();
|
||||
|
||||
while (smb_shares != NULL) {
|
||||
if (strcmp(impl_share->sharepath, smb_shares->path) == 0)
|
||||
return B_TRUE;
|
||||
return (B_TRUE);
|
||||
|
||||
smb_shares = smb_shares->next;
|
||||
}
|
||||
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to update a share's options. A share's options might be out of
|
||||
* date if the share was loaded from disk and the "sharesmb" dataset
|
||||
* property has changed in the meantime. This function also takes care
|
||||
@ -367,8 +371,8 @@ smb_update_shareopts(sa_share_impl_t impl_share, const char *resource,
|
||||
boolean_t needs_reshare = B_FALSE;
|
||||
char *old_shareopts;
|
||||
|
||||
if(!impl_share)
|
||||
return SA_SYSTEM_ERR;
|
||||
if (!impl_share)
|
||||
return (SA_SYSTEM_ERR);
|
||||
|
||||
FSINFO(impl_share, smb_fstype)->active =
|
||||
smb_is_share_active(impl_share);
|
||||
@ -384,7 +388,7 @@ smb_update_shareopts(sa_share_impl_t impl_share, const char *resource,
|
||||
shareopts_dup = strdup(shareopts);
|
||||
|
||||
if (shareopts_dup == NULL)
|
||||
return SA_NO_MEMORY;
|
||||
return (SA_NO_MEMORY);
|
||||
|
||||
if (old_shareopts != NULL)
|
||||
free(old_shareopts);
|
||||
@ -394,10 +398,10 @@ smb_update_shareopts(sa_share_impl_t impl_share, const char *resource,
|
||||
if (needs_reshare)
|
||||
smb_enable_share(impl_share);
|
||||
|
||||
return SA_OK;
|
||||
return (SA_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Clears a share's SMB options. Used by libshare to
|
||||
* clean up shares that are about to be free()'d.
|
||||
*/
|
||||
@ -427,15 +431,15 @@ smb_available(void)
|
||||
|
||||
if (lstat(SHARE_DIR, &statbuf) != 0 ||
|
||||
!S_ISDIR(statbuf.st_mode))
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
|
||||
if (access(NET_CMD_PATH, F_OK) != 0)
|
||||
return B_FALSE;
|
||||
return (B_FALSE);
|
||||
|
||||
return B_TRUE;
|
||||
return (B_TRUE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initializes the SMB functionality of libshare.
|
||||
*/
|
||||
void
|
||||
|
@ -95,7 +95,8 @@ ATOMIC_ADD(int, uint_t, int)
|
||||
ATOMIC_ADD(long, ulong_t, long)
|
||||
ATOMIC_ADD(64, uint64_t, int64_t)
|
||||
|
||||
void atomic_add_ptr(volatile void *target, ssize_t bits)
|
||||
void
|
||||
atomic_add_ptr(volatile void *target, ssize_t bits)
|
||||
{
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
|
||||
*(caddr_t *)target += bits;
|
||||
@ -120,7 +121,8 @@ ATOMIC_SUB(int, uint_t, int)
|
||||
ATOMIC_SUB(long, ulong_t, long)
|
||||
ATOMIC_SUB(64, uint64_t, int64_t)
|
||||
|
||||
void atomic_sub_ptr(volatile void *target, ssize_t bits)
|
||||
void
|
||||
atomic_sub_ptr(volatile void *target, ssize_t bits)
|
||||
{
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
|
||||
*(caddr_t *)target -= bits;
|
||||
@ -175,7 +177,7 @@ ATOMIC_AND(64, uint64_t)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (++(*target)); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_INC_NV(long, unsigned long)
|
||||
@ -196,7 +198,7 @@ ATOMIC_INC_NV(64, uint64_t)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (--(*target)); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_DEC_NV(long, unsigned long)
|
||||
@ -217,7 +219,7 @@ ATOMIC_DEC_NV(64, uint64_t)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (*target += bits); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_ADD_NV(8, uint8_t, int8_t)
|
||||
@ -229,7 +231,8 @@ ATOMIC_ADD_NV(int, uint_t, int)
|
||||
ATOMIC_ADD_NV(long, ulong_t, long)
|
||||
ATOMIC_ADD_NV(64, uint64_t, int64_t)
|
||||
|
||||
void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
|
||||
void *
|
||||
atomic_add_ptr_nv(volatile void *target, ssize_t bits)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
@ -237,7 +240,7 @@ void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
|
||||
ptr = (*(caddr_t *)target += bits);
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
|
||||
@ -248,7 +251,7 @@ void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (*target -= bits); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_SUB_NV(8, uint8_t, int8_t)
|
||||
@ -260,7 +263,8 @@ ATOMIC_SUB_NV(int, uint_t, int)
|
||||
ATOMIC_SUB_NV(long, ulong_t, long)
|
||||
ATOMIC_SUB_NV(64, uint64_t, int64_t)
|
||||
|
||||
void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
|
||||
void *
|
||||
atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
@ -268,7 +272,7 @@ void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
|
||||
ptr = (*(caddr_t *)target -= bits);
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
|
||||
@ -279,7 +283,7 @@ void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (*target |= bits); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_OR_NV(long, unsigned long)
|
||||
@ -300,7 +304,7 @@ ATOMIC_OR_NV(64, uint64_t)
|
||||
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
|
||||
rc = (*target &= bits); \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return rc; \
|
||||
return (rc); \
|
||||
}
|
||||
|
||||
ATOMIC_AND_NV(long, unsigned long)
|
||||
@ -327,7 +331,7 @@ ATOMIC_AND_NV(64, uint64_t)
|
||||
if (old == arg1) \
|
||||
*target = arg2; \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return old; \
|
||||
return (old); \
|
||||
}
|
||||
|
||||
ATOMIC_CAS(8, uint8_t)
|
||||
@ -339,7 +343,8 @@ ATOMIC_CAS(uint, uint_t)
|
||||
ATOMIC_CAS(ulong, ulong_t)
|
||||
ATOMIC_CAS(64, uint64_t)
|
||||
|
||||
void *atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
|
||||
void *
|
||||
atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
|
||||
{
|
||||
void *old;
|
||||
|
||||
@ -349,7 +354,7 @@ void *atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
|
||||
*(void **)target = arg2;
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return old;
|
||||
return (old);
|
||||
}
|
||||
|
||||
|
||||
@ -365,7 +370,7 @@ void *atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
|
||||
old = *target; \
|
||||
*target = bits; \
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
|
||||
return old; \
|
||||
return (old); \
|
||||
}
|
||||
|
||||
ATOMIC_SWAP(8, uint8_t)
|
||||
@ -377,7 +382,8 @@ ATOMIC_SWAP(uint, uint_t)
|
||||
ATOMIC_SWAP(ulong, ulong_t)
|
||||
ATOMIC_SWAP(64, uint64_t)
|
||||
|
||||
void *atomic_swap_ptr(volatile void *target, void *bits)
|
||||
void *
|
||||
atomic_swap_ptr(volatile void *target, void *bits)
|
||||
{
|
||||
void *old;
|
||||
|
||||
@ -386,11 +392,12 @@ void *atomic_swap_ptr(volatile void *target, void *bits)
|
||||
*(void **)target = bits;
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return old;
|
||||
return (old);
|
||||
}
|
||||
|
||||
|
||||
int atomic_set_long_excl(volatile ulong_t *target, uint_t value)
|
||||
int
|
||||
atomic_set_long_excl(volatile ulong_t *target, uint_t value)
|
||||
{
|
||||
ulong_t bit;
|
||||
|
||||
@ -398,15 +405,16 @@ int atomic_set_long_excl(volatile ulong_t *target, uint_t value)
|
||||
bit = (1UL << value);
|
||||
if ((*target & bit) != 0) {
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
*target |= bit;
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
|
||||
int
|
||||
atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
|
||||
{
|
||||
ulong_t bit;
|
||||
|
||||
@ -414,67 +422,78 @@ int atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
|
||||
bit = (1UL << value);
|
||||
if ((*target & bit) != 0) {
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
*target &= ~bit;
|
||||
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
void membar_enter(void)
|
||||
void
|
||||
membar_enter(void)
|
||||
{
|
||||
/* XXX - Implement me */
|
||||
}
|
||||
|
||||
void membar_exit(void)
|
||||
void
|
||||
membar_exit(void)
|
||||
{
|
||||
/* XXX - Implement me */
|
||||
}
|
||||
|
||||
void membar_producer(void)
|
||||
void
|
||||
membar_producer(void)
|
||||
{
|
||||
/* XXX - Implement me */
|
||||
}
|
||||
|
||||
void membar_consumer(void)
|
||||
void
|
||||
membar_consumer(void)
|
||||
{
|
||||
/* XXX - Implement me */
|
||||
}
|
||||
|
||||
/* Legacy kernel interfaces; they will go away (eventually). */
|
||||
|
||||
uint8_t cas8(uint8_t *target, uint8_t arg1, uint8_t arg2)
|
||||
uint8_t
|
||||
cas8(uint8_t *target, uint8_t arg1, uint8_t arg2)
|
||||
{
|
||||
return atomic_cas_8(target, arg1, arg2);
|
||||
return (atomic_cas_8(target, arg1, arg2));
|
||||
}
|
||||
|
||||
uint32_t cas32(uint32_t *target, uint32_t arg1, uint32_t arg2)
|
||||
uint32_t
|
||||
cas32(uint32_t *target, uint32_t arg1, uint32_t arg2)
|
||||
{
|
||||
return atomic_cas_32(target, arg1, arg2);
|
||||
return (atomic_cas_32(target, arg1, arg2));
|
||||
}
|
||||
|
||||
uint64_t cas64(uint64_t *target, uint64_t arg1, uint64_t arg2)
|
||||
uint64_t
|
||||
cas64(uint64_t *target, uint64_t arg1, uint64_t arg2)
|
||||
{
|
||||
return atomic_cas_64(target, arg1, arg2);
|
||||
return (atomic_cas_64(target, arg1, arg2));
|
||||
}
|
||||
|
||||
ulong_t caslong(ulong_t *target, ulong_t arg1, ulong_t arg2)
|
||||
ulong_t
|
||||
caslong(ulong_t *target, ulong_t arg1, ulong_t arg2)
|
||||
{
|
||||
return atomic_cas_ulong(target, arg1, arg2);
|
||||
return (atomic_cas_ulong(target, arg1, arg2));
|
||||
}
|
||||
|
||||
void *casptr(void *target, void *arg1, void *arg2)
|
||||
void *
|
||||
casptr(void *target, void *arg1, void *arg2)
|
||||
{
|
||||
return atomic_cas_ptr(target, arg1, arg2);
|
||||
return (atomic_cas_ptr(target, arg1, arg2));
|
||||
}
|
||||
|
||||
void atomic_and_long(ulong_t *target, ulong_t bits)
|
||||
void
|
||||
atomic_and_long(ulong_t *target, ulong_t bits)
|
||||
{
|
||||
return atomic_and_ulong(target, bits);
|
||||
return (atomic_and_ulong(target, bits));
|
||||
}
|
||||
|
||||
void atomic_or_long(ulong_t *target, ulong_t bits)
|
||||
void
|
||||
atomic_or_long(ulong_t *target, ulong_t bits)
|
||||
{
|
||||
return atomic_or_ulong(target, bits);
|
||||
return (atomic_or_ulong(target, bits));
|
||||
}
|
||||
|
@ -41,7 +41,8 @@ getexecname(void)
|
||||
pthread_mutex_lock(&mtx);
|
||||
|
||||
if (strlen(execname) == 0) {
|
||||
rc = readlink("/proc/self/exe", execname, sizeof(execname) - 1);
|
||||
rc = readlink("/proc/self/exe",
|
||||
execname, sizeof (execname) - 1);
|
||||
if (rc == -1) {
|
||||
execname[0] = '\0';
|
||||
} else {
|
||||
@ -53,5 +54,5 @@ getexecname(void)
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&mtx);
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
@ -41,5 +41,5 @@ gethrtime(void)
|
||||
abort();
|
||||
}
|
||||
|
||||
return (((u_int64_t)ts.tv_sec) * NANOSEC) + ts.tv_nsec;
|
||||
return ((((u_int64_t)ts.tv_sec) * NANOSEC) + ts.tv_nsec);
|
||||
}
|
||||
|
@ -41,7 +41,8 @@
|
||||
|
||||
__thread char buf[BUFSIZE];
|
||||
|
||||
#define DIFF(xx) ((mrefp->xx != NULL) && \
|
||||
#define DIFF(xx) ( \
|
||||
(mrefp->xx != NULL) && \
|
||||
(mgetp->xx == NULL || strcmp(mrefp->xx, mgetp->xx) != 0))
|
||||
|
||||
int
|
||||
@ -49,11 +50,12 @@ getmntany(FILE *fp, struct mnttab *mgetp, struct mnttab *mrefp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
while (((ret = _sol_getmntent(fp, mgetp)) == 0) &&
|
||||
(DIFF(mnt_special) || DIFF(mnt_mountp) ||
|
||||
while (
|
||||
((ret = _sol_getmntent(fp, mgetp)) == 0) && (
|
||||
DIFF(mnt_special) || DIFF(mnt_mountp) ||
|
||||
DIFF(mnt_fstype) || DIFF(mnt_mntopts)));
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -69,13 +71,13 @@ _sol_getmntent(FILE *fp, struct mnttab *mgetp)
|
||||
mgetp->mnt_mountp = mntbuf.mnt_dir;
|
||||
mgetp->mnt_fstype = mntbuf.mnt_type;
|
||||
mgetp->mnt_mntopts = mntbuf.mnt_opts;
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (feof(fp))
|
||||
return -1;
|
||||
return (-1);
|
||||
|
||||
return MNT_TOOLONG;
|
||||
return (MNT_TOOLONG);
|
||||
}
|
||||
|
||||
int
|
||||
@ -89,11 +91,11 @@ getextmntent(FILE *fp, struct extmnttab *mp, int len)
|
||||
if (stat64(mp->mnt_mountp, &st) != 0) {
|
||||
mp->mnt_major = 0;
|
||||
mp->mnt_minor = 0;
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
mp->mnt_major = major(st.st_dev);
|
||||
mp->mnt_minor = minor(st.st_dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -36,13 +36,73 @@ typedef struct devid_nmlist {
|
||||
dev_t dev;
|
||||
} devid_nmlist_t;
|
||||
|
||||
static inline int devid_str_decode(char *devidstr, ddi_devid_t *retdevid, char **retminor_name) { abort(); }
|
||||
static inline int devid_deviceid_to_nmlist(char *search_path, ddi_devid_t devid, char *minor_name, devid_nmlist_t **retlist) { abort(); }
|
||||
static inline void devid_str_free(char *str) { abort(); }
|
||||
static inline void devid_free(ddi_devid_t devid) { abort(); }
|
||||
static inline void devid_free_nmlist(devid_nmlist_t *list) { abort(); }
|
||||
static inline int devid_get(int fd, ddi_devid_t *retdevid) { return -1; }
|
||||
static inline int devid_get_minor_name(int fd, char **retminor_name) { abort(); }
|
||||
static inline char *devid_str_encode(ddi_devid_t devid, char *minor_name) { abort(); }
|
||||
static inline
|
||||
int
|
||||
devid_str_decode(
|
||||
char *devidstr,
|
||||
ddi_devid_t *retdevid,
|
||||
char **retminor_name)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
int
|
||||
devid_deviceid_to_nmlist(
|
||||
char *search_path,
|
||||
ddi_devid_t devid,
|
||||
char *minor_name,
|
||||
devid_nmlist_t **retlist)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
void
|
||||
devid_str_free(char *str)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
void
|
||||
devid_free(ddi_devid_t devid)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
void
|
||||
devid_free_nmlist(devid_nmlist_t *list)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
int
|
||||
devid_get(
|
||||
int fd,
|
||||
ddi_devid_t *retdevid)
|
||||
{
|
||||
return (-1);
|
||||
}
|
||||
|
||||
static inline
|
||||
int
|
||||
devid_get_minor_name(
|
||||
int fd,
|
||||
char **retminor_name)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline
|
||||
char *
|
||||
devid_str_encode(
|
||||
ddi_devid_t devid,
|
||||
char *minor_name)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
#define FREAD 1
|
||||
#define FWRITE 2
|
||||
//#define FAPPEND 8
|
||||
// #define FAPPEND 8
|
||||
|
||||
#define FCREAT O_CREAT
|
||||
#define FTRUNC O_TRUNC
|
||||
|
@ -19,7 +19,7 @@
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T*/
|
||||
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
|
||||
/* All Rights Reserved */
|
||||
/*
|
||||
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
|
||||
@ -77,7 +77,7 @@ static inline char *_sol_hasmntopt(struct mnttab *mnt, char *opt)
|
||||
|
||||
mnt_new.mnt_opts = mnt->mnt_mntopts;
|
||||
|
||||
return hasmntopt(&mnt_new, opt);
|
||||
return (hasmntopt(&mnt_new, opt));
|
||||
}
|
||||
|
||||
#define hasmntopt _sol_hasmntopt
|
||||
|
@ -71,15 +71,15 @@
|
||||
* kernel back to 2.4.11 so we define them correctly if they are missing.
|
||||
*/
|
||||
#ifdef MNT_FORCE
|
||||
# define MS_FORCE MNT_FORCE
|
||||
#define MS_FORCE MNT_FORCE
|
||||
#else
|
||||
# define MS_FORCE 0x00000001
|
||||
#define MS_FORCE 0x00000001
|
||||
#endif /* MNT_FORCE */
|
||||
|
||||
#ifdef MNT_DETACH
|
||||
# define MS_DETACH MNT_DETACH
|
||||
#define MS_DETACH MNT_DETACH
|
||||
#else
|
||||
# define MS_DETACH 0x00000002
|
||||
#define MS_DETACH 0x00000002
|
||||
#endif /* MNT_DETACH */
|
||||
|
||||
/*
|
||||
|
@ -28,9 +28,9 @@
|
||||
#define _LIBSPL_SYS_SDT_H
|
||||
|
||||
#define DTRACE_PROBE(a) ((void) 0)
|
||||
#define DTRACE_PROBE1(a,b,c) ((void) 0)
|
||||
#define DTRACE_PROBE2(a,b,c,d,e) ((void) 0)
|
||||
#define DTRACE_PROBE3(a,b,c,d,e,f,g) ((void) 0)
|
||||
#define DTRACE_PROBE4(a,b,c,d,e,f,g,h,i) ((void) 0)
|
||||
#define DTRACE_PROBE1(a, b, c) ((void) 0)
|
||||
#define DTRACE_PROBE2(a, b, c, d, e) ((void) 0)
|
||||
#define DTRACE_PROBE3(a, b, c, d, e, f, g) ((void) 0)
|
||||
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void) 0)
|
||||
|
||||
#endif
|
||||
|
@ -39,7 +39,7 @@ stack_getbounds(stack_t *sp)
|
||||
|
||||
rc = pthread_getattr_np(pthread_self(), &attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
rc = pthread_attr_getstack(&attr, &sp->ss_sp, &sp->ss_size);
|
||||
if (rc == 0)
|
||||
@ -47,7 +47,7 @@ stack_getbounds(stack_t *sp)
|
||||
|
||||
pthread_attr_destroy(&attr);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -57,15 +57,17 @@ thr_stksegment(stack_t *sp)
|
||||
|
||||
rc = stack_getbounds(sp);
|
||||
if (rc)
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
/* thr_stksegment() is expected to set sp.ss_sp to the high stack
|
||||
/*
|
||||
* thr_stksegment() is expected to set sp.ss_sp to the high stack
|
||||
* address, but the stack_getbounds() interface is expected to
|
||||
* set sp.ss_sp to the low address. Adjust accordingly. */
|
||||
* set sp.ss_sp to the low address. Adjust accordingly.
|
||||
*/
|
||||
sp->ss_sp = (void *)(((uintptr_t)sp->ss_sp) + sp->ss_size);
|
||||
sp->ss_flags = 0;
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
#endif /* __USE_GNU */
|
||||
|
@ -37,14 +37,14 @@ static inline int
|
||||
fstat64_blk(int fd, struct stat64 *st)
|
||||
{
|
||||
if (fstat64(fd, st) == -1)
|
||||
return -1;
|
||||
return (-1);
|
||||
|
||||
/* In Linux we need to use an ioctl to get the size of a block device */
|
||||
if (S_ISBLK(st->st_mode)) {
|
||||
if (ioctl(fd, BLKGETSIZE64, &st->st_size) != 0)
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
#endif /* _LIBSPL_SYS_STAT_H */
|
||||
|
@ -40,9 +40,9 @@
|
||||
#define ABS(a) ((a) < 0 ? -(a) : (a))
|
||||
#endif
|
||||
|
||||
#define makedevice(maj,min) makedev(maj,min)
|
||||
#define makedevice(maj, min) makedev(maj, min)
|
||||
#define _sysconf(a) sysconf(a)
|
||||
#define __NORETURN __attribute__ ((noreturn))
|
||||
#define __NORETURN __attribute__((noreturn))
|
||||
|
||||
/*
|
||||
* Compatibility macros/typedefs needed for Solaris -> Linux port
|
||||
|
@ -32,6 +32,6 @@
|
||||
/* to hold a decimal or hex */
|
||||
/* hostid string */
|
||||
|
||||
#define sysinfo(cmd,buf,cnt) (-1)
|
||||
#define sysinfo(cmd, buf, cnt) (-1)
|
||||
|
||||
#endif
|
||||
|
@ -52,7 +52,7 @@ typedef enum uio_rw {
|
||||
typedef enum uio_seg {
|
||||
UIO_USERSPACE = 0,
|
||||
UIO_SYSSPACE = 1,
|
||||
UIO_USERISPACE= 2,
|
||||
UIO_USERISPACE = 2,
|
||||
} uio_seg_t;
|
||||
|
||||
typedef struct uio {
|
||||
|
@ -27,7 +27,8 @@
|
||||
#ifndef _LIBSPL_UMEM_H
|
||||
#define _LIBSPL_UMEM_H
|
||||
|
||||
/* XXX: We should use the real portable umem library if it is detected
|
||||
/*
|
||||
* XXX: We should use the real portable umem library if it is detected
|
||||
* at configure time. However, if the library is not available, we can
|
||||
* use a trivial malloc based implementation. This obviously impacts
|
||||
* performance, but unless you are using a full userspace build of zpool for
|
||||
@ -87,7 +88,7 @@ umem_alloc(size_t size, int flags)
|
||||
ptr = malloc(size);
|
||||
} while (ptr == NULL && (flags & UMEM_NOFAIL));
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
@ -105,10 +106,10 @@ umem_alloc_aligned(size_t size, size_t align, int flags)
|
||||
__func__, align);
|
||||
if (flags & UMEM_NOFAIL)
|
||||
abort();
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
@ -120,7 +121,7 @@ umem_zalloc(size_t size, int flags)
|
||||
if (ptr)
|
||||
memset(ptr, 0, size);
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -133,7 +134,8 @@ static inline void
|
||||
umem_nofail_callback(umem_nofail_callback_t *cb) {}
|
||||
|
||||
static inline umem_cache_t *
|
||||
umem_cache_create(char *name, size_t bufsize, size_t align,
|
||||
umem_cache_create(
|
||||
char *name, size_t bufsize, size_t align,
|
||||
umem_constructor_t *constructor,
|
||||
umem_destructor_t *destructor,
|
||||
umem_reclaim_t *reclaim,
|
||||
@ -141,7 +143,7 @@ umem_cache_create(char *name, size_t bufsize, size_t align,
|
||||
{
|
||||
umem_cache_t *cp;
|
||||
|
||||
cp = umem_alloc(sizeof(umem_cache_t), UMEM_DEFAULT);
|
||||
cp = umem_alloc(sizeof (umem_cache_t), UMEM_DEFAULT);
|
||||
if (cp) {
|
||||
strncpy(cp->cache_name, name, UMEM_CACHE_NAMELEN);
|
||||
cp->cache_bufsize = bufsize;
|
||||
@ -154,13 +156,13 @@ umem_cache_create(char *name, size_t bufsize, size_t align,
|
||||
cp->cache_cflags = cflags;
|
||||
}
|
||||
|
||||
return cp;
|
||||
return (cp);
|
||||
}
|
||||
|
||||
static inline void
|
||||
umem_cache_destroy(umem_cache_t *cp)
|
||||
{
|
||||
umem_free(cp, sizeof(umem_cache_t));
|
||||
umem_free(cp, sizeof (umem_cache_t));
|
||||
}
|
||||
|
||||
static inline void *
|
||||
@ -169,14 +171,15 @@ umem_cache_alloc(umem_cache_t *cp, int flags)
|
||||
void *ptr;
|
||||
|
||||
if (cp->cache_align != 0)
|
||||
ptr = umem_alloc_aligned(cp->cache_bufsize, cp->cache_align, flags);
|
||||
ptr = umem_alloc_aligned(
|
||||
cp->cache_bufsize, cp->cache_align, flags);
|
||||
else
|
||||
ptr = umem_alloc(cp->cache_bufsize, flags);
|
||||
|
||||
if (ptr && cp->cache_constructor)
|
||||
cp->cache_constructor(ptr, cp->cache_private, UMEM_DEFAULT);
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -30,18 +30,18 @@
|
||||
#define _LIBSPL_UNISTD_H
|
||||
|
||||
#if !defined(HAVE_IOCTL_IN_UNISTD_H)
|
||||
# if defined(HAVE_IOCTL_IN_SYS_IOCTL_H)
|
||||
# include <sys/ioctl.h>
|
||||
# elif defined(HAVE_IOCTL_IN_STROPTS_H)
|
||||
# include <stropts.h>
|
||||
# else
|
||||
# error "System call ioctl() unavailable"
|
||||
# endif
|
||||
#endif
|
||||
#if defined(HAVE_IOCTL_IN_SYS_IOCTL_H)
|
||||
#include <sys/ioctl.h>
|
||||
#elif defined(HAVE_IOCTL_IN_STROPTS_H)
|
||||
#include <stropts.h>
|
||||
#else /* HAVE_IOCTL_IN_STROPTS_H */
|
||||
#error "System call ioctl() unavailable"
|
||||
#endif /* HAVE_IOCTL_IN_SYS_IOCTL_H */
|
||||
#endif /* !HAVE_IOCTL_IN_UNISTD_H */
|
||||
|
||||
#if !defined(HAVE_ISSETUGID)
|
||||
# include <sys/types.h>
|
||||
# define issetugid() (geteuid() == 0 || getegid() == 0)
|
||||
#include <sys/types.h>
|
||||
#define issetugid() (geteuid() == 0 || getegid() == 0)
|
||||
#endif
|
||||
|
||||
#endif /* _LIBSPL_UNISTD_H */
|
||||
|
@ -27,34 +27,37 @@
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
zoneid_t getzoneid()
|
||||
zoneid_t
|
||||
getzoneid()
|
||||
{
|
||||
return GLOBAL_ZONEID;
|
||||
return (GLOBAL_ZONEID);
|
||||
}
|
||||
|
||||
zoneid_t getzoneidbyname(const char *name)
|
||||
zoneid_t
|
||||
getzoneidbyname(const char *name)
|
||||
{
|
||||
if(name == NULL)
|
||||
return GLOBAL_ZONEID;
|
||||
if (name == NULL)
|
||||
return (GLOBAL_ZONEID);
|
||||
|
||||
if(strcmp(name, GLOBAL_ZONEID_NAME) == 0)
|
||||
return GLOBAL_ZONEID;
|
||||
if (strcmp(name, GLOBAL_ZONEID_NAME) == 0)
|
||||
return (GLOBAL_ZONEID);
|
||||
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
ssize_t getzonenamebyid(zoneid_t id, char *buf, size_t buflen)
|
||||
ssize_t
|
||||
getzonenamebyid(zoneid_t id, char *buf, size_t buflen)
|
||||
{
|
||||
if(id != GLOBAL_ZONEID)
|
||||
return EINVAL;
|
||||
if (id != GLOBAL_ZONEID)
|
||||
return (EINVAL);
|
||||
|
||||
ssize_t ret = strlen(GLOBAL_ZONEID_NAME) + 1;
|
||||
|
||||
if(buf == NULL || buflen == 0)
|
||||
return ret;
|
||||
if (buf == NULL || buflen == 0)
|
||||
return (ret);
|
||||
|
||||
strncpy(buf, GLOBAL_ZONEID_NAME, buflen);
|
||||
buf[buflen - 1] = '\0';
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -461,9 +461,6 @@ libzfs_fru_clear(libzfs_handle_t *hdl, boolean_t final)
|
||||
void
|
||||
libzfs_fru_clear(libzfs_handle_t *hdl, boolean_t final)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* HAVE_LIBTOPO */
|
||||
|
||||
|
||||
|
@ -168,7 +168,7 @@ fix_paths(nvlist_t *nv, name_entry_t *names)
|
||||
}
|
||||
|
||||
if ((strlen(path) == strlen(ne->ne_name)) &&
|
||||
!strncmp(path, ne->ne_name, strlen(path))) {
|
||||
strncmp(path, ne->ne_name, strlen(path)) == 0) {
|
||||
best = ne;
|
||||
break;
|
||||
}
|
||||
@ -997,7 +997,7 @@ err_blkid3:
|
||||
err_blkid2:
|
||||
blkid_put_cache(cache);
|
||||
err_blkid1:
|
||||
return err;
|
||||
return (err);
|
||||
}
|
||||
#endif /* HAVE_LIBBLKID */
|
||||
|
||||
|
@ -287,22 +287,22 @@ do_mount(const char *src, const char *mntpt, char *opts)
|
||||
rc = libzfs_run_process(argv[0], argv, STDOUT_VERBOSE|STDERR_VERBOSE);
|
||||
if (rc) {
|
||||
if (rc & MOUNT_FILEIO)
|
||||
return EIO;
|
||||
return (EIO);
|
||||
if (rc & MOUNT_USER)
|
||||
return EINTR;
|
||||
return (EINTR);
|
||||
if (rc & MOUNT_SOFTWARE)
|
||||
return EPIPE;
|
||||
return (EPIPE);
|
||||
if (rc & MOUNT_BUSY)
|
||||
return EBUSY;
|
||||
return (EBUSY);
|
||||
if (rc & MOUNT_SYSERR)
|
||||
return EAGAIN;
|
||||
return (EAGAIN);
|
||||
if (rc & MOUNT_USAGE)
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
|
||||
return ENXIO; /* Generic error */
|
||||
return (ENXIO); /* Generic error */
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -242,7 +242,7 @@ int
|
||||
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
|
||||
zprop_source_t *srctype)
|
||||
{
|
||||
return zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE);
|
||||
return (zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -250,8 +250,8 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
|
||||
* a pre-allocated buffer.
|
||||
*/
|
||||
int
|
||||
zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
|
||||
zprop_source_t *srctype, boolean_t literal)
|
||||
zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
|
||||
size_t len, zprop_source_t *srctype, boolean_t literal)
|
||||
{
|
||||
uint64_t intval;
|
||||
const char *strval;
|
||||
@ -1222,8 +1222,9 @@ zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
|
||||
* part of an active md or lvm device.
|
||||
*/
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"one or more vdevs refer to the same device, or one of\n"
|
||||
"the devices is part of an active md or lvm device"));
|
||||
"one or more vdevs refer to the same device, or "
|
||||
"one of\nthe devices is part of an active md or "
|
||||
"lvm device"));
|
||||
return (zfs_error(hdl, EZFS_BADDEV, msg));
|
||||
|
||||
case EOVERFLOW:
|
||||
@ -2427,7 +2428,7 @@ zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
|
||||
|
||||
if (path[0] != '/') {
|
||||
error = zfs_resolve_shortname(path, buf,
|
||||
sizeof(buf));
|
||||
sizeof (buf));
|
||||
if (error != 0)
|
||||
return (zfs_error(hdl, EZFS_NODEVICE,
|
||||
msg));
|
||||
@ -3460,7 +3461,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
|
||||
*/
|
||||
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
|
||||
&value) == 0 && value) {
|
||||
return strip_partition(hdl, path);
|
||||
return (strip_partition(hdl, path));
|
||||
}
|
||||
} else {
|
||||
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
|
||||
@ -3880,7 +3881,8 @@ zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
|
||||
|
||||
if (dsobj == 0) {
|
||||
/* special case for the MOS */
|
||||
(void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
|
||||
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
|
||||
(longlong_t)obj);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3912,7 +3914,8 @@ zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
|
||||
dsname, zc.zc_value);
|
||||
}
|
||||
} else {
|
||||
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
|
||||
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
|
||||
(longlong_t)obj);
|
||||
}
|
||||
free(mntpnt);
|
||||
}
|
||||
@ -4012,22 +4015,22 @@ zpool_label_disk_check(char *path)
|
||||
int fd, err;
|
||||
|
||||
if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
|
||||
return errno;
|
||||
return (errno);
|
||||
|
||||
if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
|
||||
(void) close(fd);
|
||||
return err;
|
||||
return (err);
|
||||
}
|
||||
|
||||
if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
|
||||
efi_free(vtoc);
|
||||
(void) close(fd);
|
||||
return EIDRM;
|
||||
return (EIDRM);
|
||||
}
|
||||
|
||||
efi_free(vtoc);
|
||||
(void) close(fd);
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4167,5 +4170,5 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
|
||||
return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
@ -617,8 +617,8 @@ libzfs_module_loaded(const char *module)
|
||||
const char path_prefix[] = "/sys/module/";
|
||||
char path[256];
|
||||
|
||||
memcpy(path, path_prefix, sizeof(path_prefix) - 1);
|
||||
strcpy(path + sizeof(path_prefix) - 1, module);
|
||||
memcpy(path, path_prefix, sizeof (path_prefix) - 1);
|
||||
strcpy(path + sizeof (path_prefix) - 1, module);
|
||||
|
||||
return (access(path, F_OK) == 0);
|
||||
}
|
||||
@ -652,12 +652,12 @@ libzfs_run_process(const char *path, char *argv[], int flags)
|
||||
while ((rc = waitpid(pid, &status, 0)) == -1 &&
|
||||
errno == EINTR);
|
||||
if (rc < 0 || !WIFEXITED(status))
|
||||
return -1;
|
||||
return (-1);
|
||||
|
||||
return WEXITSTATUS(status);
|
||||
return (WEXITSTATUS(status));
|
||||
}
|
||||
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
int
|
||||
@ -666,9 +666,9 @@ libzfs_load_module(const char *module)
|
||||
char *argv[4] = {"/sbin/modprobe", "-q", (char *)module, (char *)0};
|
||||
|
||||
if (libzfs_module_loaded(module))
|
||||
return 0;
|
||||
return (0);
|
||||
|
||||
return libzfs_run_process("/sbin/modprobe", argv, 0);
|
||||
return (libzfs_run_process("/sbin/modprobe", argv, 0));
|
||||
}
|
||||
|
||||
libzfs_handle_t *
|
||||
@ -914,7 +914,7 @@ zfs_strcmp_shortname(char *name, char *cmp_name, int wholedisk)
|
||||
if (wholedisk)
|
||||
path_len = zfs_append_partition(path_name, MAXPATHLEN);
|
||||
|
||||
if ((path_len == cmp_len) && !strcmp(path_name, cmp_name)) {
|
||||
if ((path_len == cmp_len) && strcmp(path_name, cmp_name) == 0) {
|
||||
error = 0;
|
||||
break;
|
||||
}
|
||||
@ -957,7 +957,7 @@ zfs_strcmp_pathname(char *name, char *cmp, int wholedisk)
|
||||
}
|
||||
|
||||
if (name[0] != '/')
|
||||
return zfs_strcmp_shortname(name, cmp_name, wholedisk);
|
||||
return (zfs_strcmp_shortname(name, cmp_name, wholedisk));
|
||||
|
||||
strncpy(path_name, name, MAXPATHLEN);
|
||||
path_len = strlen(path_name);
|
||||
@ -1316,7 +1316,7 @@ str2shift(libzfs_handle_t *hdl, const char *buf)
|
||||
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
|
||||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
|
||||
buf[3] == '\0'))))
|
||||
return (10*i);
|
||||
return (10 * i);
|
||||
|
||||
if (hdl)
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
|
@ -74,7 +74,7 @@ thread_init(void)
|
||||
VERIFY3S(pthread_key_create(&kthread_key, NULL), ==, 0);
|
||||
|
||||
/* Create entry for primary kthread */
|
||||
kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
|
||||
kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
|
||||
kt->t_tid = pthread_self();
|
||||
kt->t_func = NULL;
|
||||
|
||||
@ -93,7 +93,7 @@ thread_fini(void)
|
||||
ASSERT(pthread_equal(kt->t_tid, pthread_self()));
|
||||
ASSERT3P(kt->t_func, ==, NULL);
|
||||
|
||||
umem_free(kt, sizeof(kthread_t));
|
||||
umem_free(kt, sizeof (kthread_t));
|
||||
|
||||
/* Wait for all threads to exit via thread_exit() */
|
||||
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
|
||||
@ -117,7 +117,7 @@ zk_thread_current(void)
|
||||
|
||||
ASSERT3P(kt, !=, NULL);
|
||||
|
||||
return kt;
|
||||
return (kt);
|
||||
}
|
||||
|
||||
void *
|
||||
@ -137,7 +137,7 @@ zk_thread_helper(void *arg)
|
||||
/* Unreachable, thread must exit with thread_exit() */
|
||||
abort();
|
||||
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
kthread_t *
|
||||
@ -150,7 +150,7 @@ zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
|
||||
|
||||
ASSERT3S(state & ~TS_RUN, ==, 0);
|
||||
|
||||
kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
|
||||
kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
|
||||
kt->t_func = func;
|
||||
kt->t_arg = arg;
|
||||
|
||||
@ -188,7 +188,7 @@ zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
|
||||
|
||||
VERIFY3S(pthread_attr_destroy(&attr), ==, 0);
|
||||
|
||||
return kt;
|
||||
return (kt);
|
||||
}
|
||||
|
||||
void
|
||||
@ -198,7 +198,7 @@ zk_thread_exit(void)
|
||||
|
||||
ASSERT(pthread_equal(kt->t_tid, pthread_self()));
|
||||
|
||||
umem_free(kt, sizeof(kthread_t));
|
||||
umem_free(kt, sizeof (kthread_t));
|
||||
|
||||
pthread_mutex_lock(&kthread_lock);
|
||||
kthread_nr--;
|
||||
|
@ -157,7 +157,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags,
|
||||
int
|
||||
taskq_empty_ent(taskq_ent_t *t)
|
||||
{
|
||||
return t->tqent_next == NULL;
|
||||
return (t->tqent_next == NULL);
|
||||
}
|
||||
|
||||
void
|
||||
@ -287,7 +287,8 @@ taskq_create(const char *name, int nthreads, pri_t pri,
|
||||
tq->tq_maxalloc = maxalloc;
|
||||
tq->tq_task.tqent_next = &tq->tq_task;
|
||||
tq->tq_task.tqent_prev = &tq->tq_task;
|
||||
tq->tq_threadlist = kmem_alloc(nthreads*sizeof(kthread_t *), KM_SLEEP);
|
||||
tq->tq_threadlist = kmem_alloc(nthreads * sizeof (kthread_t *),
|
||||
KM_SLEEP);
|
||||
|
||||
if (flags & TASKQ_PREPOPULATE) {
|
||||
mutex_enter(&tq->tq_lock);
|
||||
|
@ -1624,7 +1624,7 @@ nvlist_lookup_nvpair_ei_sep(nvlist_t *nvl, const char *name, const char sep,
|
||||
{
|
||||
nvpair_t *nvp;
|
||||
const char *np;
|
||||
char *sepp=NULL;
|
||||
char *sepp = NULL;
|
||||
char *idxp, *idxep;
|
||||
nvlist_t **nva;
|
||||
long idx = 0;
|
||||
|
@ -70,19 +70,16 @@ uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio)
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
case UIO_USERISPACE:
|
||||
/* p = kernel data pointer
|
||||
* iov->iov_base = user data pointer */
|
||||
|
||||
/*
|
||||
* p = kernel data pointer
|
||||
* iov->iov_base = user data pointer
|
||||
*/
|
||||
if (rw == UIO_READ) {
|
||||
if (copy_to_user(iov->iov_base, p, cnt))
|
||||
return EFAULT;
|
||||
/* error = xcopyout_nta(p, iov->iov_base, cnt,
|
||||
* (uio->uio_extflg & UIO_COPY_CACHED)); */
|
||||
return (EFAULT);
|
||||
} else {
|
||||
/* error = xcopyin_nta(iov->iov_base, p, cnt,
|
||||
* (uio->uio_extflg & UIO_COPY_CACHED)); */
|
||||
if (copy_from_user(p, iov->iov_base, cnt))
|
||||
return EFAULT;
|
||||
return (EFAULT);
|
||||
}
|
||||
break;
|
||||
case UIO_SYSSPACE:
|
||||
@ -194,21 +191,18 @@ uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes)
|
||||
|
||||
case UIO_USERSPACE:
|
||||
case UIO_USERISPACE:
|
||||
/* p = kernel data pointer
|
||||
* iov->iov_base = user data pointer */
|
||||
|
||||
/*
|
||||
* p = kernel data pointer
|
||||
* iov->iov_base = user data pointer
|
||||
*/
|
||||
if (rw == UIO_READ) {
|
||||
/* * UIO_READ = copy data from kernel to user * */
|
||||
/* UIO_READ = copy data from kernel to user */
|
||||
if (copy_to_user(iov->iov_base, p, cnt))
|
||||
return EFAULT;
|
||||
/* error = xcopyout_nta(p, iov->iov_base, cnt,
|
||||
* (uio->uio_extflg & UIO_COPY_CACHED)); */
|
||||
return (EFAULT);
|
||||
} else {
|
||||
/* * UIO_WRITE = copy data from user to kernel * */
|
||||
/* error = xcopyin_nta(iov->iov_base, p, cnt,
|
||||
* (uio->uio_extflg & UIO_COPY_CACHED)); */
|
||||
/* UIO_WRITE = copy data from user to kernel */
|
||||
if (copy_from_user(p, iov->iov_base, cnt))
|
||||
return EFAULT;
|
||||
return (EFAULT);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -906,8 +906,10 @@ buf_fini(void)
|
||||
int i;
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_free() in the linux kernel */
|
||||
/*
|
||||
* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_free() in the linux kernel\
|
||||
*/
|
||||
vmem_free(buf_hash_table.ht_table,
|
||||
(buf_hash_table.ht_mask + 1) * sizeof (void *));
|
||||
#else
|
||||
@ -998,8 +1000,10 @@ buf_init(void)
|
||||
retry:
|
||||
buf_hash_table.ht_mask = hsize - 1;
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_alloc() in the linux kernel */
|
||||
/*
|
||||
* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_alloc() in the linux kernel
|
||||
*/
|
||||
buf_hash_table.ht_table =
|
||||
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
|
||||
#else
|
||||
@ -1219,7 +1223,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
|
||||
arc_buf_hdr_t *hdr = ab->b_hdr;
|
||||
arc_state_t *state = hdr->b_state;
|
||||
|
||||
memset(abi, 0, sizeof(arc_buf_info_t));
|
||||
memset(abi, 0, sizeof (arc_buf_info_t));
|
||||
abi->abi_flags = hdr->b_flags;
|
||||
abi->abi_datacnt = hdr->b_datacnt;
|
||||
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
|
||||
@ -2031,7 +2035,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes,
|
||||
int count = 0;
|
||||
|
||||
ASSERT(GHOST_STATE(state));
|
||||
bzero(&marker, sizeof(marker));
|
||||
bzero(&marker, sizeof (marker));
|
||||
top:
|
||||
mutex_enter(&state->arcs_mtx);
|
||||
for (ab = list_tail(list); ab; ab = ab_prev) {
|
||||
@ -2412,7 +2416,8 @@ arc_adapt_thread(void)
|
||||
}
|
||||
|
||||
/* reset the growth delay for every reclaim */
|
||||
arc_grow_time = ddi_get_lbolt()+(zfs_arc_grow_retry * hz);
|
||||
arc_grow_time = ddi_get_lbolt() +
|
||||
(zfs_arc_grow_retry * hz);
|
||||
|
||||
arc_kmem_reap_now(last_reclaim, 0);
|
||||
arc_warm = B_TRUE;
|
||||
@ -3394,7 +3399,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
|
||||
{
|
||||
arc_prune_t *p;
|
||||
|
||||
p = kmem_alloc(sizeof(*p), KM_SLEEP);
|
||||
p = kmem_alloc(sizeof (*p), KM_SLEEP);
|
||||
p->p_pfunc = func;
|
||||
p->p_private = private;
|
||||
list_link_init(&p->p_node);
|
||||
|
@ -305,8 +305,10 @@ dbuf_init(void)
|
||||
retry:
|
||||
h->hash_table_mask = hsize - 1;
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_alloc() in the linux kernel */
|
||||
/*
|
||||
* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_alloc() in the linux kernel
|
||||
*/
|
||||
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
|
||||
#else
|
||||
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
|
||||
@ -339,8 +341,10 @@ dbuf_fini(void)
|
||||
for (i = 0; i < DBUF_MUTEXES; i++)
|
||||
mutex_destroy(&h->hash_mutexes[i]);
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_free() in the linux kernel */
|
||||
/*
|
||||
* Large allocations which do not require contiguous pages
|
||||
* should be using vmem_free() in the linux kernel
|
||||
*/
|
||||
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
|
||||
#else
|
||||
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
|
||||
@ -1700,8 +1704,7 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
|
||||
if (dh == NULL) {
|
||||
err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
|
||||
fail_sparse, NULL, parentp);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
__dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
|
||||
blkid >> epbs, fail_sparse, NULL,
|
||||
parentp, dh->dh_depth + 1);
|
||||
@ -1956,7 +1959,8 @@ top:
|
||||
dh->dh_fail_sparse, &dh->dh_parent,
|
||||
&dh->dh_bp, dh);
|
||||
if (dh->dh_fail_sparse) {
|
||||
if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
|
||||
if (dh->dh_err == 0 &&
|
||||
dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
|
||||
dh->dh_err = SET_ERROR(ENOENT);
|
||||
if (dh->dh_err) {
|
||||
if (dh->dh_parent)
|
||||
@ -2037,13 +2041,13 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
|
||||
struct dbuf_hold_impl_data *dh;
|
||||
int error;
|
||||
|
||||
dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
|
||||
dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) *
|
||||
DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
|
||||
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
|
||||
|
||||
error = __dbuf_hold_impl(dh);
|
||||
|
||||
kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
|
||||
kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
|
||||
DBUF_HOLD_IMPL_MAX_DEPTH);
|
||||
|
||||
return (error);
|
||||
@ -2359,7 +2363,8 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
|
||||
}
|
||||
}
|
||||
|
||||
/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
|
||||
/*
|
||||
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
|
||||
* is critical the we not allow the compiler to inline this function in to
|
||||
* dbuf_sync_list() thereby drastically bloating the stack usage.
|
||||
*/
|
||||
@ -2409,7 +2414,8 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
||||
zio_nowait(zio);
|
||||
}
|
||||
|
||||
/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
|
||||
/*
|
||||
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
|
||||
* critical the we not allow the compiler to inline this function in to
|
||||
* dbuf_sync_list() thereby drastically bloating the stack usage.
|
||||
*/
|
||||
|
@ -53,10 +53,10 @@ dbuf_stats_hash_table_headers(char *buf, size_t size)
|
||||
"%-6s %-6s %-8s %-8s %-6s %-6s %-5s %-8s %-8s\n",
|
||||
"dbuf", "arcbuf", "dnode", "pool", "objset", "object", "level",
|
||||
"blkid", "offset", "dbsize", "meta", "state", "dbholds", "list",
|
||||
"atype", "index", "flags", "count", "asize", "access", "mru", "gmru",
|
||||
"mfu", "gmfu", "l2", "l2_dattr", "l2_asize", "l2_comp", "aholds",
|
||||
"dtype", "btype", "data_bs", "meta_bs", "bsize",
|
||||
"lvls", "dholds", "blocks", "dsize");
|
||||
"atype", "index", "flags", "count", "asize", "access",
|
||||
"mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
|
||||
"l2_comp", "aholds", "dtype", "btype", "data_bs", "meta_bs",
|
||||
"bsize", "lvls", "dholds", "blocks", "dsize");
|
||||
buf[size] = '\0';
|
||||
|
||||
return (0);
|
||||
|
@ -916,20 +916,20 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
|
||||
return (B_TRUE);
|
||||
|
||||
ddt = spa->spa_ddt[BP_GET_CHECKSUM(bp)];
|
||||
dde = kmem_alloc(sizeof(ddt_entry_t), KM_PUSHPAGE);
|
||||
dde = kmem_alloc(sizeof (ddt_entry_t), KM_PUSHPAGE);
|
||||
|
||||
ddt_key_fill(&(dde->dde_key), bp);
|
||||
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class <= max_class; class++) {
|
||||
if (ddt_object_lookup(ddt, type, class, dde) == 0) {
|
||||
kmem_free(dde, sizeof(ddt_entry_t));
|
||||
kmem_free(dde, sizeof (ddt_entry_t));
|
||||
return (B_TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kmem_free(dde, sizeof(ddt_entry_t));
|
||||
kmem_free(dde, sizeof (ddt_entry_t));
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
@ -1209,5 +1209,5 @@ ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_dedup_prefetch, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_dedup_prefetch,"Enable prefetching dedup-ed blks");
|
||||
MODULE_PARM_DESC(zfs_dedup_prefetch, "Enable prefetching dedup-ed blks");
|
||||
#endif
|
||||
|
@ -141,7 +141,7 @@ ddt_zap_walk(objset_t *os, uint64_t object, ddt_entry_t *dde, uint64_t *walk)
|
||||
static int
|
||||
ddt_zap_count(objset_t *os, uint64_t object, uint64_t *count)
|
||||
{
|
||||
return zap_count(os, object, count);
|
||||
return (zap_count(os, object, count));
|
||||
}
|
||||
|
||||
const ddt_ops_t ddt_zap_ops = {
|
||||
|
@ -400,7 +400,8 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
|
||||
}
|
||||
nblks = 1;
|
||||
}
|
||||
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_PUSHPAGE | KM_NODEBUG);
|
||||
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks,
|
||||
KM_PUSHPAGE | KM_NODEBUG);
|
||||
|
||||
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
|
||||
blkid = dbuf_whichblock(dn, offset);
|
||||
@ -1044,7 +1045,7 @@ dmu_req_copy(void *arg_buf, int size, int *offset, struct request *req)
|
||||
bv->bv_len -= tocpy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1067,13 +1068,13 @@ dmu_bio_clone(struct bio *bio, struct bio **bio_copy)
|
||||
struct bio *bio_new;
|
||||
|
||||
if (bio == NULL)
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
|
||||
while (bio) {
|
||||
bio_new = bio_clone(bio, GFP_NOIO);
|
||||
if (bio_new == NULL) {
|
||||
dmu_bio_put(bio_root);
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
if (bio_last) {
|
||||
@ -1089,7 +1090,7 @@ dmu_bio_clone(struct bio *bio, struct bio **bio_copy)
|
||||
|
||||
*bio_copy = bio_root;
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -1564,7 +1565,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
|
||||
zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
|
||||
zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp,
|
||||
dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, dsa,
|
||||
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, zb));
|
||||
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL|ZIO_FLAG_FASTWRITE, zb));
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -1485,7 +1485,7 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
|
||||
int
|
||||
dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value)
|
||||
{
|
||||
return dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value);
|
||||
return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value));
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -703,7 +703,8 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
|
||||
if (cur_streams >= max_streams) {
|
||||
return;
|
||||
}
|
||||
newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
|
||||
newstream =
|
||||
kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
|
||||
}
|
||||
|
||||
newstream->zst_offset = zst.zst_offset;
|
||||
@ -743,4 +744,3 @@ MODULE_PARM_DESC(zfetch_block_cap, "Max number of blocks to fetch at a time");
|
||||
module_param(zfetch_array_rd_sz, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
|
||||
#endif
|
||||
|
||||
|
@ -326,10 +326,10 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp)
|
||||
dp = startdd->dd_pool;
|
||||
mos = dp->dp_meta_objset;
|
||||
|
||||
zc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP);
|
||||
za = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP);
|
||||
basezc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP);
|
||||
baseza = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP);
|
||||
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
|
||||
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
|
||||
basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
|
||||
baseza = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
|
||||
source = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, KM_SLEEP);
|
||||
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
|
||||
@ -371,10 +371,10 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp)
|
||||
}
|
||||
|
||||
kmem_free(source, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
|
||||
kmem_free(baseza, sizeof(zap_attribute_t));
|
||||
kmem_free(basezc, sizeof(zap_cursor_t));
|
||||
kmem_free(za, sizeof(zap_attribute_t));
|
||||
kmem_free(zc, sizeof(zap_cursor_t));
|
||||
kmem_free(baseza, sizeof (zap_attribute_t));
|
||||
kmem_free(basezc, sizeof (zap_cursor_t));
|
||||
kmem_free(za, sizeof (zap_attribute_t));
|
||||
kmem_free(zc, sizeof (zap_cursor_t));
|
||||
|
||||
dsl_dir_rele(startdd, FTAG);
|
||||
dsl_pool_rele(dp, FTAG);
|
||||
|
@ -500,7 +500,8 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
|
||||
return (0);
|
||||
|
||||
dsda.dsda_snaps = snaps;
|
||||
VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps, NV_UNIQUE_NAME, KM_PUSHPAGE));
|
||||
VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps,
|
||||
NV_UNIQUE_NAME, KM_PUSHPAGE));
|
||||
dsda.dsda_defer = defer;
|
||||
dsda.dsda_errlist = errlist;
|
||||
|
||||
|
@ -48,8 +48,8 @@ static void
|
||||
dsl_dir_evict(dmu_buf_t *db, void *arg)
|
||||
{
|
||||
dsl_dir_t *dd = arg;
|
||||
ASSERTV(dsl_pool_t *dp = dd->dd_pool;)
|
||||
int t;
|
||||
ASSERTV(dsl_pool_t *dp = dd->dd_pool);
|
||||
|
||||
for (t = 0; t < TXG_SIZE; t++) {
|
||||
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
|
||||
@ -1109,7 +1109,7 @@ dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
|
||||
|
||||
dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
|
||||
dsl_dataset_rele(ds, FTAG);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
|
||||
|
@ -1049,12 +1049,11 @@ dsl_pool_config_held(dsl_pool_t *dp)
|
||||
EXPORT_SYMBOL(dsl_pool_config_enter);
|
||||
EXPORT_SYMBOL(dsl_pool_config_exit);
|
||||
|
||||
/* zfs_dirty_data_max_percent only applied at module load time in arc_init(). */
|
||||
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
|
||||
module_param(zfs_dirty_data_max_percent, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
|
||||
|
||||
/* zfs_dirty_data_max_max_percent only applied at module load time in
|
||||
* arc_init(). */
|
||||
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
|
||||
module_param(zfs_dirty_data_max_max_percent, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
|
||||
"zfs_dirty_data_max upper bound as % of RAM");
|
||||
@ -1065,7 +1064,7 @@ MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
|
||||
module_param(zfs_dirty_data_max, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
|
||||
|
||||
/* zfs_dirty_data_max_max only applied at module load time in arc_init(). */
|
||||
/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
|
||||
module_param(zfs_dirty_data_max_max, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_dirty_data_max_max,
|
||||
"zfs_dirty_data_max upper bound in bytes");
|
||||
|
@ -201,9 +201,11 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
|
||||
|
||||
if (vdev_resilver_needed(spa->spa_root_vdev,
|
||||
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
|
||||
spa_event_notify(spa, NULL, FM_EREPORT_ZFS_RESILVER_START);
|
||||
spa_event_notify(spa, NULL,
|
||||
FM_EREPORT_ZFS_RESILVER_START);
|
||||
} else {
|
||||
spa_event_notify(spa, NULL, FM_EREPORT_ZFS_SCRUB_START);
|
||||
spa_event_notify(spa, NULL,
|
||||
FM_EREPORT_ZFS_SCRUB_START);
|
||||
}
|
||||
|
||||
spa->spa_scrub_started = B_TRUE;
|
||||
@ -783,7 +785,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_t *zb,
|
||||
if (buf)
|
||||
(void) arc_buf_remove_ref(buf, &buf);
|
||||
out:
|
||||
kmem_free(bp_toread, sizeof(blkptr_t));
|
||||
kmem_free(bp_toread, sizeof (blkptr_t));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1290,8 +1292,8 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
|
||||
* bookmark so we don't think that we're still trying to resume.
|
||||
*/
|
||||
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t));
|
||||
zc = kmem_alloc(sizeof(zap_cursor_t), KM_PUSHPAGE);
|
||||
za = kmem_alloc(sizeof(zap_attribute_t), KM_PUSHPAGE);
|
||||
zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE);
|
||||
za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
|
||||
|
||||
/* keep pulling things out of the zap-object-as-queue */
|
||||
while (zap_cursor_init(zc, dp->dp_meta_objset,
|
||||
@ -1325,8 +1327,8 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
|
||||
}
|
||||
zap_cursor_fini(zc);
|
||||
out:
|
||||
kmem_free(za, sizeof(zap_attribute_t));
|
||||
kmem_free(zc, sizeof(zap_cursor_t));
|
||||
kmem_free(za, sizeof (zap_attribute_t));
|
||||
kmem_free(zc, sizeof (zap_cursor_t));
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
|
@ -418,15 +418,15 @@ zfs_zevent_alloc(void)
|
||||
{
|
||||
zevent_t *ev;
|
||||
|
||||
ev = kmem_zalloc(sizeof(zevent_t), KM_PUSHPAGE);
|
||||
ev = kmem_zalloc(sizeof (zevent_t), KM_PUSHPAGE);
|
||||
if (ev == NULL)
|
||||
return NULL;
|
||||
return (NULL);
|
||||
|
||||
list_create(&ev->ev_ze_list, sizeof(zfs_zevent_t),
|
||||
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
|
||||
offsetof(zfs_zevent_t, ze_node));
|
||||
list_link_init(&ev->ev_node);
|
||||
|
||||
return ev;
|
||||
return (ev);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -436,7 +436,7 @@ zfs_zevent_free(zevent_t *ev)
|
||||
ev->ev_cb(ev->ev_nvl, ev->ev_detector);
|
||||
|
||||
list_destroy(&ev->ev_ze_list);
|
||||
kmem_free(ev, sizeof(zevent_t));
|
||||
kmem_free(ev, sizeof (zevent_t));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -592,8 +592,10 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/* Existing stream continue with the next element and remove
|
||||
* ourselves from the wait queue for the previous element */
|
||||
/*
|
||||
* Existing stream continue with the next element and remove
|
||||
* ourselves from the wait queue for the previous element
|
||||
*/
|
||||
ev = list_prev(&zevent_list, ze->ze_zevent);
|
||||
if (ev == NULL) {
|
||||
error = ENOENT;
|
||||
@ -619,7 +621,7 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
|
||||
out:
|
||||
mutex_exit(&zevent_lock);
|
||||
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
@ -643,7 +645,7 @@ zfs_zevent_wait(zfs_zevent_t *ze)
|
||||
out:
|
||||
mutex_exit(&zevent_lock);
|
||||
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1512,7 +1514,8 @@ fm_init(void)
|
||||
}
|
||||
|
||||
mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
list_create(&zevent_list, sizeof(zevent_t), offsetof(zevent_t, ev_node));
|
||||
list_create(&zevent_list, sizeof (zevent_t),
|
||||
offsetof(zevent_t, ev_node));
|
||||
cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,8 @@ static kmem_cache_t *lz4_cache;
|
||||
|
||||
/*ARGSUSED*/
|
||||
size_t
|
||||
lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
|
||||
size_t d_len, int n)
|
||||
{
|
||||
uint32_t bufsiz;
|
||||
char *dest = d_start;
|
||||
@ -74,7 +75,8 @@ lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n
|
||||
|
||||
/*ARGSUSED*/
|
||||
int
|
||||
lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
|
||||
size_t d_len, int n)
|
||||
{
|
||||
const char *src = s_start;
|
||||
uint32_t bufsiz = BE_IN32(src);
|
||||
@ -143,16 +145,16 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int
|
||||
* This function explicitly handles the CTX memory structure.
|
||||
*
|
||||
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
|
||||
* by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
|
||||
* isn't valid.
|
||||
* by the caller (either on the stack or using kmem_cache_alloc). Passing
|
||||
* NULL isn't valid.
|
||||
*
|
||||
* LZ4_compress64kCtx() :
|
||||
* Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
|
||||
* isize *Must* be <64KB, otherwise the output will be corrupted.
|
||||
*
|
||||
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
|
||||
* by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
|
||||
* isn't valid.
|
||||
* by the caller (either on the stack or using kmem_cache_alloc). Passing
|
||||
* NULL isn't valid.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1009,4 +1011,3 @@ lz4_fini(void)
|
||||
lz4_cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
while (src < (uchar_t *)s_start + s_len) {
|
||||
if ((copymask <<= 1) == (1 << NBBY)) {
|
||||
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
|
||||
kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
|
||||
kmem_free(lempel,
|
||||
LEMPEL_SIZE*sizeof (uint16_t));
|
||||
return (s_len);
|
||||
}
|
||||
copymask = 1;
|
||||
|
@ -2021,7 +2021,8 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
|
||||
return (error);
|
||||
}
|
||||
|
||||
void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
|
||||
void
|
||||
metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
|
||||
{
|
||||
const dva_t *dva = bp->blk_dva;
|
||||
int ndvas = BP_GET_NDVAS(bp);
|
||||
@ -2043,7 +2044,8 @@ void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
|
||||
spa_config_exit(spa, SCL_VDEV, FTAG);
|
||||
}
|
||||
|
||||
void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
|
||||
void
|
||||
metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
|
||||
{
|
||||
const dva_t *dva = bp->blk_dva;
|
||||
int ndvas = BP_GET_NDVAS(bp);
|
||||
|
@ -251,7 +251,7 @@ sa_cache_fini(void)
|
||||
void *
|
||||
sa_spill_alloc(int flags)
|
||||
{
|
||||
return kmem_cache_alloc(spill_cache, flags);
|
||||
return (kmem_cache_alloc(spill_cache, flags));
|
||||
}
|
||||
|
||||
void
|
||||
@ -607,7 +607,8 @@ sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
|
||||
}
|
||||
|
||||
if (is_var_sz && var_size > 1) {
|
||||
/* Don't worry that the spill block might overflow.
|
||||
/*
|
||||
* Don't worry that the spill block might overflow.
|
||||
* It will be resized if needed in sa_build_layouts().
|
||||
*/
|
||||
if (buftype == SA_SPILL ||
|
||||
@ -1142,7 +1143,8 @@ sa_tear_down(objset_t *os)
|
||||
sa_free_attr_table(sa);
|
||||
|
||||
cookie = NULL;
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))){
|
||||
while ((layout =
|
||||
avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
|
||||
sa_idx_tab_t *tab;
|
||||
while ((tab = list_head(&layout->lot_idx_tab))) {
|
||||
ASSERT(refcount_count(&tab->sa_refcount));
|
||||
@ -1151,7 +1153,7 @@ sa_tear_down(objset_t *os)
|
||||
}
|
||||
|
||||
cookie = NULL;
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))){
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))) {
|
||||
kmem_free(layout->lot_attrs,
|
||||
sizeof (sa_attr_type_t) * layout->lot_attr_count);
|
||||
kmem_free(layout, sizeof (sa_lot_t));
|
||||
|
@ -288,7 +288,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
|
||||
|
||||
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
|
||||
if (err)
|
||||
return err;
|
||||
return (err);
|
||||
|
||||
mutex_enter(&spa->spa_props_lock);
|
||||
|
||||
@ -488,7 +488,8 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((error = dmu_objset_hold(strval,FTAG,&os)))
|
||||
error = dmu_objset_hold(strval, FTAG, &os);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
/* Must be ZPL and not gzip compressed. */
|
||||
@ -2434,9 +2435,9 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
|
||||
hostid != myhostid) {
|
||||
nvlist_free(nvconfig);
|
||||
cmn_err(CE_WARN, "pool '%s' could not be "
|
||||
"loaded as it was last accessed by "
|
||||
"another system (host: %s hostid: 0x%lx). "
|
||||
"See: http://zfsonlinux.org/msg/ZFS-8000-EY",
|
||||
"loaded as it was last accessed by another "
|
||||
"system (host: %s hostid: 0x%lx). See: "
|
||||
"http://zfsonlinux.org/msg/ZFS-8000-EY",
|
||||
spa_name(spa), hostname,
|
||||
(unsigned long)hostid);
|
||||
return (SET_ERROR(EBADF));
|
||||
@ -4098,7 +4099,9 @@ spa_tryimport(nvlist_t *tryconfig)
|
||||
if (dsl_dsobj_to_dsname(spa_name(spa),
|
||||
spa->spa_bootfs, tmpname) == 0) {
|
||||
char *cp;
|
||||
char *dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
|
||||
char *dsname;
|
||||
|
||||
dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
|
||||
|
||||
cp = strchr(tmpname, '/');
|
||||
if (cp == NULL) {
|
||||
@ -5865,7 +5868,7 @@ spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
|
||||
if (sav->sav_count == 0) {
|
||||
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
|
||||
} else {
|
||||
list = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
|
||||
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_PUSHPAGE);
|
||||
for (i = 0; i < sav->sav_count; i++)
|
||||
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
|
||||
B_FALSE, VDEV_CONFIG_L2CACHE);
|
||||
|
@ -1883,7 +1883,7 @@ EXPORT_SYMBOL(spa_mode);
|
||||
EXPORT_SYMBOL(spa_namespace_lock);
|
||||
|
||||
module_param(zfs_deadman_synctime_ms, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_deadman_synctime_ms,"Expiration time in milliseconds");
|
||||
MODULE_PARM_DESC(zfs_deadman_synctime_ms, "Expiration time in milliseconds");
|
||||
|
||||
module_param(zfs_deadman_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer");
|
||||
|
@ -122,14 +122,14 @@ spa_read_history_update(kstat_t *ksp, int rw)
|
||||
|
||||
while ((srh = list_remove_head(&ssh->list))) {
|
||||
ssh->size--;
|
||||
kmem_free(srh, sizeof(spa_read_history_t));
|
||||
kmem_free(srh, sizeof (spa_read_history_t));
|
||||
}
|
||||
|
||||
ASSERT3U(ssh->size, ==, 0);
|
||||
}
|
||||
|
||||
ksp->ks_ndata = ssh->size;
|
||||
ksp->ks_data_size = ssh->size * sizeof(spa_read_history_t);
|
||||
ksp->ks_data_size = ssh->size * sizeof (spa_read_history_t);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -181,7 +181,7 @@ spa_read_history_destroy(spa_t *spa)
|
||||
mutex_enter(&ssh->lock);
|
||||
while ((srh = list_remove_head(&ssh->list))) {
|
||||
ssh->size--;
|
||||
kmem_free(srh, sizeof(spa_read_history_t));
|
||||
kmem_free(srh, sizeof (spa_read_history_t));
|
||||
}
|
||||
|
||||
ASSERT3U(ssh->size, ==, 0);
|
||||
@ -206,9 +206,9 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags)
|
||||
if (zfs_read_history_hits == 0 && (aflags & ARC_CACHED))
|
||||
return;
|
||||
|
||||
srh = kmem_zalloc(sizeof(spa_read_history_t), KM_PUSHPAGE);
|
||||
strlcpy(srh->origin, zb->zb_func, sizeof(srh->origin));
|
||||
strlcpy(srh->comm, getcomm(), sizeof(srh->comm));
|
||||
srh = kmem_zalloc(sizeof (spa_read_history_t), KM_PUSHPAGE);
|
||||
strlcpy(srh->origin, zb->zb_func, sizeof (srh->origin));
|
||||
strlcpy(srh->comm, getcomm(), sizeof (srh->comm));
|
||||
srh->start = gethrtime();
|
||||
srh->objset = zb->zb_objset;
|
||||
srh->object = zb->zb_object;
|
||||
@ -226,7 +226,7 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags)
|
||||
while (ssh->size > zfs_read_history) {
|
||||
ssh->size--;
|
||||
rm = list_remove_tail(&ssh->list);
|
||||
kmem_free(rm, sizeof(spa_read_history_t));
|
||||
kmem_free(rm, sizeof (spa_read_history_t));
|
||||
}
|
||||
|
||||
mutex_exit(&ssh->lock);
|
||||
@ -343,14 +343,14 @@ spa_txg_history_update(kstat_t *ksp, int rw)
|
||||
|
||||
while ((sth = list_remove_head(&ssh->list))) {
|
||||
ssh->size--;
|
||||
kmem_free(sth, sizeof(spa_txg_history_t));
|
||||
kmem_free(sth, sizeof (spa_txg_history_t));
|
||||
}
|
||||
|
||||
ASSERT3U(ssh->size, ==, 0);
|
||||
}
|
||||
|
||||
ksp->ks_ndata = ssh->size;
|
||||
ksp->ks_data_size = ssh->size * sizeof(spa_txg_history_t);
|
||||
ksp->ks_data_size = ssh->size * sizeof (spa_txg_history_t);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -402,7 +402,7 @@ spa_txg_history_destroy(spa_t *spa)
|
||||
mutex_enter(&ssh->lock);
|
||||
while ((sth = list_remove_head(&ssh->list))) {
|
||||
ssh->size--;
|
||||
kmem_free(sth, sizeof(spa_txg_history_t));
|
||||
kmem_free(sth, sizeof (spa_txg_history_t));
|
||||
}
|
||||
|
||||
ASSERT3U(ssh->size, ==, 0);
|
||||
@ -424,7 +424,7 @@ spa_txg_history_add(spa_t *spa, uint64_t txg)
|
||||
if (zfs_txg_history == 0 && ssh->size == 0)
|
||||
return;
|
||||
|
||||
sth = kmem_zalloc(sizeof(spa_txg_history_t), KM_PUSHPAGE);
|
||||
sth = kmem_zalloc(sizeof (spa_txg_history_t), KM_PUSHPAGE);
|
||||
sth->txg = txg;
|
||||
sth->state = TXG_STATE_OPEN;
|
||||
sth->times[TXG_STATE_BIRTH] = gethrtime();
|
||||
@ -437,7 +437,7 @@ spa_txg_history_add(spa_t *spa, uint64_t txg)
|
||||
while (ssh->size > zfs_txg_history) {
|
||||
ssh->size--;
|
||||
rm = list_remove_tail(&ssh->list);
|
||||
kmem_free(rm, sizeof(spa_txg_history_t));
|
||||
kmem_free(rm, sizeof (spa_txg_history_t));
|
||||
}
|
||||
|
||||
mutex_exit(&ssh->lock);
|
||||
@ -536,7 +536,7 @@ spa_tx_assign_update(kstat_t *ksp, int rw)
|
||||
break;
|
||||
|
||||
ksp->ks_ndata = i;
|
||||
ksp->ks_data_size = i * sizeof(kstat_named_t);
|
||||
ksp->ks_data_size = i * sizeof (kstat_named_t);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -553,7 +553,7 @@ spa_tx_assign_init(spa_t *spa)
|
||||
mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
ssh->count = 42; /* power of two buckets for 1ns to 2,199s */
|
||||
ssh->size = ssh->count * sizeof(kstat_named_t);
|
||||
ssh->size = ssh->count * sizeof (kstat_named_t);
|
||||
ssh->private = kmem_alloc(ssh->size, KM_SLEEP);
|
||||
|
||||
(void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
|
||||
|
@ -493,8 +493,8 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||
|
||||
txg_thread_enter(tx, &cpr);
|
||||
|
||||
vs1 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE);
|
||||
vs2 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE);
|
||||
vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE);
|
||||
vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE);
|
||||
|
||||
start = delta = 0;
|
||||
for (;;) {
|
||||
@ -533,8 +533,8 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||
}
|
||||
|
||||
if (tx->tx_exiting) {
|
||||
kmem_free(vs2, sizeof(vdev_stat_t));
|
||||
kmem_free(vs1, sizeof(vdev_stat_t));
|
||||
kmem_free(vs2, sizeof (vdev_stat_t));
|
||||
kmem_free(vs1, sizeof (vdev_stat_t));
|
||||
txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
|
||||
}
|
||||
|
||||
|
@ -256,8 +256,8 @@ vdev_cache_read(zio_t *zio)
|
||||
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
|
||||
vdev_cache_entry_t *ve, *ve_search;
|
||||
uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
|
||||
ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);)
|
||||
zio_t *fio;
|
||||
ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS));
|
||||
|
||||
ASSERT(zio->io_type == ZIO_TYPE_READ);
|
||||
|
||||
@ -277,10 +277,10 @@ vdev_cache_read(zio_t *zio)
|
||||
|
||||
mutex_enter(&vc->vc_lock);
|
||||
|
||||
ve_search = kmem_alloc(sizeof(vdev_cache_entry_t), KM_PUSHPAGE);
|
||||
ve_search = kmem_alloc(sizeof (vdev_cache_entry_t), KM_PUSHPAGE);
|
||||
ve_search->ve_offset = cache_offset;
|
||||
ve = avl_find(&vc->vc_offset_tree, ve_search, NULL);
|
||||
kmem_free(ve_search, sizeof(vdev_cache_entry_t));
|
||||
kmem_free(ve_search, sizeof (vdev_cache_entry_t));
|
||||
|
||||
if (ve != NULL) {
|
||||
if (ve->ve_missed_update) {
|
||||
|
@ -65,7 +65,7 @@ vdev_bdev_mode(int smode)
|
||||
if (smode & FWRITE)
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
return mode;
|
||||
return (mode);
|
||||
}
|
||||
#else
|
||||
static int
|
||||
@ -78,7 +78,7 @@ vdev_bdev_mode(int smode)
|
||||
if ((smode & FREAD) && !(smode & FWRITE))
|
||||
mode = MS_RDONLY;
|
||||
|
||||
return mode;
|
||||
return (mode);
|
||||
}
|
||||
#endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
|
||||
|
||||
@ -139,18 +139,19 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
|
||||
return (0);
|
||||
|
||||
/* Leave existing scheduler when set to "none" */
|
||||
if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4))
|
||||
if (strncmp(elevator, "none", 4) && (strlen(elevator) == 4) == 0)
|
||||
return (0);
|
||||
|
||||
#ifdef HAVE_ELEVATOR_CHANGE
|
||||
error = elevator_change(q, elevator);
|
||||
#else
|
||||
/* For pre-2.6.36 kernels elevator_change() is not available.
|
||||
/*
|
||||
* For pre-2.6.36 kernels elevator_change() is not available.
|
||||
* Therefore we fall back to using a usermodehelper to echo the
|
||||
* elevator into sysfs; This requires /bin/echo and sysfs to be
|
||||
* mounted which may not be true early in the boot process.
|
||||
*/
|
||||
# define SET_SCHEDULER_CMD \
|
||||
#define SET_SCHEDULER_CMD \
|
||||
"exec 0</dev/null " \
|
||||
" 1>/sys/block/%s/queue/scheduler " \
|
||||
" 2>/dev/null; " \
|
||||
@ -207,7 +208,7 @@ vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd)
|
||||
|
||||
bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder);
|
||||
if (IS_ERR(bdev))
|
||||
return bdev;
|
||||
return (bdev);
|
||||
|
||||
disk = get_gendisk(bdev->bd_dev, &partno);
|
||||
vdev_bdev_close(bdev, vdev_bdev_mode(mode));
|
||||
@ -231,9 +232,9 @@ vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd)
|
||||
put_disk(disk);
|
||||
}
|
||||
|
||||
return result;
|
||||
return (result);
|
||||
#else
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return (ERR_PTR(-EOPNOTSUPP));
|
||||
#endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
|
||||
}
|
||||
|
||||
@ -248,7 +249,7 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
|
||||
/* Must have a pathname and it must be absolute. */
|
||||
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
|
||||
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
|
||||
return EINVAL;
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -261,9 +262,9 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
|
||||
goto skip_open;
|
||||
}
|
||||
|
||||
vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE);
|
||||
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_PUSHPAGE);
|
||||
if (vd == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
/*
|
||||
* Devices are always opened by the path provided at configuration
|
||||
@ -286,8 +287,8 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
|
||||
bdev = vdev_bdev_open(v->vdev_path,
|
||||
vdev_bdev_mode(mode), zfs_vdev_holder);
|
||||
if (IS_ERR(bdev)) {
|
||||
kmem_free(vd, sizeof(vdev_disk_t));
|
||||
return -PTR_ERR(bdev);
|
||||
kmem_free(vd, sizeof (vdev_disk_t));
|
||||
return (-PTR_ERR(bdev));
|
||||
}
|
||||
|
||||
v->vdev_tsd = vd;
|
||||
@ -312,7 +313,7 @@ skip_open:
|
||||
/* Try to set the io scheduler elevator algorithm */
|
||||
(void) vdev_elevator_switch(v, zfs_vdev_scheduler);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -327,7 +328,7 @@ vdev_disk_close(vdev_t *v)
|
||||
vdev_bdev_close(vd->vd_bdev,
|
||||
vdev_bdev_mode(spa_mode(v->vdev_spa)));
|
||||
|
||||
kmem_free(vd, sizeof(vdev_disk_t));
|
||||
kmem_free(vd, sizeof (vdev_disk_t));
|
||||
v->vdev_tsd = NULL;
|
||||
}
|
||||
|
||||
@ -337,8 +338,8 @@ vdev_disk_dio_alloc(int bio_count)
|
||||
dio_request_t *dr;
|
||||
int i;
|
||||
|
||||
dr = kmem_zalloc(sizeof(dio_request_t) +
|
||||
sizeof(struct bio *) * bio_count, KM_PUSHPAGE);
|
||||
dr = kmem_zalloc(sizeof (dio_request_t) +
|
||||
sizeof (struct bio *) * bio_count, KM_PUSHPAGE);
|
||||
if (dr) {
|
||||
init_completion(&dr->dr_comp);
|
||||
atomic_set(&dr->dr_ref, 0);
|
||||
@ -349,7 +350,7 @@ vdev_disk_dio_alloc(int bio_count)
|
||||
dr->dr_bio[i] = NULL;
|
||||
}
|
||||
|
||||
return dr;
|
||||
return (dr);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -361,8 +362,8 @@ vdev_disk_dio_free(dio_request_t *dr)
|
||||
if (dr->dr_bio[i])
|
||||
bio_put(dr->dr_bio[i]);
|
||||
|
||||
kmem_free(dr, sizeof(dio_request_t) +
|
||||
sizeof(struct bio *) * dr->dr_bio_count);
|
||||
kmem_free(dr, sizeof (dio_request_t) +
|
||||
sizeof (struct bio *) * dr->dr_bio_count);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -372,17 +373,17 @@ vdev_disk_dio_is_sync(dio_request_t *dr)
|
||||
/* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
|
||||
return (dr->dr_rw & (1 << BIO_RW_SYNC));
|
||||
#else
|
||||
# ifdef HAVE_BIO_RW_SYNCIO
|
||||
#ifdef HAVE_BIO_RW_SYNCIO
|
||||
/* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
|
||||
return (dr->dr_rw & (1 << BIO_RW_SYNCIO));
|
||||
# else
|
||||
# ifdef HAVE_REQ_SYNC
|
||||
#else
|
||||
#ifdef HAVE_REQ_SYNC
|
||||
/* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
|
||||
return (dr->dr_rw & REQ_SYNC);
|
||||
# else
|
||||
# error "Unable to determine bio sync flag"
|
||||
# endif /* HAVE_REQ_SYNC */
|
||||
# endif /* HAVE_BIO_RW_SYNC */
|
||||
#else
|
||||
#error "Unable to determine bio sync flag"
|
||||
#endif /* HAVE_REQ_SYNC */
|
||||
#endif /* HAVE_BIO_RW_SYNC */
|
||||
#endif /* HAVE_BIO_RW_SYNCIO */
|
||||
}
|
||||
|
||||
@ -417,7 +418,7 @@ vdev_disk_dio_put(dio_request_t *dr)
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
|
||||
@ -436,11 +437,11 @@ BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
|
||||
|
||||
#ifndef HAVE_2ARGS_BIO_END_IO_T
|
||||
if (bio->bi_size)
|
||||
return 1;
|
||||
return (1);
|
||||
#endif /* HAVE_2ARGS_BIO_END_IO_T */
|
||||
|
||||
if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||
error = -EIO;
|
||||
error = (-EIO);
|
||||
|
||||
if (dr->dr_error == 0)
|
||||
dr->dr_error = -error;
|
||||
@ -491,7 +492,7 @@ bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return bio_size;
|
||||
return (bio_size);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -509,7 +510,7 @@ __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
|
||||
retry:
|
||||
dr = vdev_disk_dio_alloc(bio_count);
|
||||
if (dr == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
|
||||
bio_set_flags_failfast(bdev, &flags);
|
||||
@ -548,7 +549,7 @@ retry:
|
||||
bio_nr_pages(bio_ptr, bio_size));
|
||||
if (dr->dr_bio[i] == NULL) {
|
||||
vdev_disk_dio_free(dr);
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
/* Matching put called by vdev_disk_physio_completion */
|
||||
@ -592,9 +593,9 @@ retry:
|
||||
ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
|
||||
}
|
||||
|
||||
(void)vdev_disk_dio_put(dr);
|
||||
(void) vdev_disk_dio_put(dr);
|
||||
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
@ -602,7 +603,7 @@ vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
|
||||
size_t size, uint64_t offset, int flags)
|
||||
{
|
||||
bio_set_flags_failfast(bdev, &flags);
|
||||
return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
|
||||
return (__vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags));
|
||||
}
|
||||
|
||||
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
|
||||
@ -631,11 +632,11 @@ vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
|
||||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!q)
|
||||
return ENXIO;
|
||||
return (ENXIO);
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, 0);
|
||||
if (!bio)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
|
||||
bio->bi_end_io = vdev_disk_io_flush_completion;
|
||||
bio->bi_private = zio;
|
||||
@ -643,7 +644,7 @@ vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
|
||||
zio->io_delay = jiffies_64;
|
||||
submit_bio(VDEV_WRITE_FLUSH_FUA, bio);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -658,7 +659,7 @@ vdev_disk_io_start(zio_t *zio)
|
||||
|
||||
if (!vdev_readable(v)) {
|
||||
zio->io_error = SET_ERROR(ENXIO);
|
||||
return ZIO_PIPELINE_CONTINUE;
|
||||
return (ZIO_PIPELINE_CONTINUE);
|
||||
}
|
||||
|
||||
switch (zio->io_cmd) {
|
||||
@ -674,7 +675,7 @@ vdev_disk_io_start(zio_t *zio)
|
||||
|
||||
error = vdev_disk_io_flush(vd->vd_bdev, zio);
|
||||
if (error == 0)
|
||||
return ZIO_PIPELINE_STOP;
|
||||
return (ZIO_PIPELINE_STOP);
|
||||
|
||||
zio->io_error = error;
|
||||
if (error == ENOTSUP)
|
||||
@ -686,7 +687,7 @@ vdev_disk_io_start(zio_t *zio)
|
||||
zio->io_error = SET_ERROR(ENOTSUP);
|
||||
}
|
||||
|
||||
return ZIO_PIPELINE_CONTINUE;
|
||||
return (ZIO_PIPELINE_CONTINUE);
|
||||
|
||||
case ZIO_TYPE_WRITE:
|
||||
flags = WRITE;
|
||||
@ -698,17 +699,17 @@ vdev_disk_io_start(zio_t *zio)
|
||||
|
||||
default:
|
||||
zio->io_error = SET_ERROR(ENOTSUP);
|
||||
return ZIO_PIPELINE_CONTINUE;
|
||||
return (ZIO_PIPELINE_CONTINUE);
|
||||
}
|
||||
|
||||
error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
|
||||
zio->io_size, zio->io_offset, flags);
|
||||
if (error) {
|
||||
zio->io_error = error;
|
||||
return ZIO_PIPELINE_CONTINUE;
|
||||
return (ZIO_PIPELINE_CONTINUE);
|
||||
}
|
||||
|
||||
return ZIO_PIPELINE_STOP;
|
||||
return (ZIO_PIPELINE_STOP);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -787,16 +788,16 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
|
||||
|
||||
bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), zfs_vdev_holder);
|
||||
if (IS_ERR(bdev))
|
||||
return -PTR_ERR(bdev);
|
||||
return (-PTR_ERR(bdev));
|
||||
|
||||
s = bdev_capacity(bdev);
|
||||
if (s == 0) {
|
||||
vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
|
||||
return EIO;
|
||||
return (EIO);
|
||||
}
|
||||
|
||||
size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
|
||||
label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE);
|
||||
size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t);
|
||||
label = vmem_alloc(sizeof (vdev_label_t), KM_PUSHPAGE);
|
||||
|
||||
for (i = 0; i < VDEV_LABELS; i++) {
|
||||
uint64_t offset, state, txg = 0;
|
||||
@ -830,10 +831,10 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
|
||||
break;
|
||||
}
|
||||
|
||||
vmem_free(label, sizeof(vdev_label_t));
|
||||
vmem_free(label, sizeof (vdev_label_t));
|
||||
vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
module_param(zfs_vdev_scheduler, charp, 0644);
|
||||
|
@ -1116,7 +1116,7 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
|
||||
buf = vp->vp_nvlist;
|
||||
buflen = sizeof (vp->vp_nvlist);
|
||||
|
||||
if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE) == 0) {
|
||||
if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE)) {
|
||||
for (; l < VDEV_LABELS; l += 2) {
|
||||
vdev_label_write(zio, vd, l, vp,
|
||||
offsetof(vdev_label_t, vl_vdev_phys),
|
||||
|
@ -500,8 +500,10 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
||||
if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
|
||||
return (NULL);
|
||||
|
||||
/* Prevent users from setting the zfs_vdev_aggregation_limit
|
||||
* tuning larger than SPA_MAXBLOCKSIZE. */
|
||||
/*
|
||||
* Prevent users from setting the zfs_vdev_aggregation_limit
|
||||
* tuning larger than SPA_MAXBLOCKSIZE.
|
||||
*/
|
||||
zfs_vdev_aggregation_limit =
|
||||
MIN(zfs_vdev_aggregation_limit, SPA_MAXBLOCKSIZE);
|
||||
|
||||
@ -676,11 +678,11 @@ again:
|
||||
* For FIFO queues (sync), issue the i/o with the lowest timestamp.
|
||||
*/
|
||||
vqc = &vq->vq_class[p];
|
||||
search = zio_buf_alloc(sizeof(*search));
|
||||
search = zio_buf_alloc(sizeof (*search));
|
||||
search->io_timestamp = 0;
|
||||
search->io_offset = vq->vq_last_offset + 1;
|
||||
VERIFY3P(avl_find(&vqc->vqc_queued_tree, search, &idx), ==, NULL);
|
||||
zio_buf_free(search, sizeof(*search));
|
||||
zio_buf_free(search, sizeof (*search));
|
||||
zio = avl_nearest(&vqc->vqc_queued_tree, idx, AVL_AFTER);
|
||||
if (zio == NULL)
|
||||
zio = avl_first(&vqc->vqc_queued_tree);
|
||||
|
@ -938,7 +938,8 @@ mzap_addent(zap_name_t *zn, uint64_t value)
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
|
||||
ASSERTV(mzap_ent_phys_t *mze=&zap->zap_m.zap_phys->mz_chunk[i]);
|
||||
ASSERTV(mzap_ent_phys_t *mze);
|
||||
ASSERT(mze = &zap->zap_m.zap_phys->mz_chunk[i]);
|
||||
ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
|
||||
}
|
||||
#endif
|
||||
|
@ -180,8 +180,8 @@ feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj,
|
||||
zap_attribute_t *za;
|
||||
char *buf;
|
||||
|
||||
zc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP);
|
||||
za = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP);
|
||||
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
|
||||
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
|
||||
buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
|
||||
|
||||
supported = B_TRUE;
|
||||
@ -215,8 +215,8 @@ feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj,
|
||||
zap_cursor_fini(zc);
|
||||
|
||||
kmem_free(buf, MAXPATHLEN);
|
||||
kmem_free(za, sizeof(zap_attribute_t));
|
||||
kmem_free(zc, sizeof(zap_cursor_t));
|
||||
kmem_free(za, sizeof (zap_attribute_t));
|
||||
kmem_free(zc, sizeof (zap_cursor_t));
|
||||
|
||||
return (supported);
|
||||
}
|
||||
|
@ -1157,7 +1157,7 @@ zfs_acl_chown_setattr(znode_t *zp)
|
||||
zfs_acl_t *aclp;
|
||||
|
||||
if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIXACL)
|
||||
return 0;
|
||||
return (0);
|
||||
|
||||
ASSERT(MUTEX_HELD(&zp->z_lock));
|
||||
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
|
||||
@ -1165,6 +1165,7 @@ zfs_acl_chown_setattr(znode_t *zp)
|
||||
if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
|
||||
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
|
||||
&zp->z_pflags, zp->z_uid, zp->z_gid);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ static taskq_t *zfs_expire_taskq;
|
||||
static zfs_snapentry_t *
|
||||
zfsctl_sep_alloc(void)
|
||||
{
|
||||
return kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP);
|
||||
return (kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP));
|
||||
}
|
||||
|
||||
void
|
||||
@ -255,7 +255,6 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id,
|
||||
void
|
||||
zfsctl_inode_destroy(struct inode *ip)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -97,7 +97,6 @@ zfs_dbgmsg_fini(void)
|
||||
mutex_destroy(&zfs_dbgmsgs_lock);
|
||||
ASSERT0(zfs_dbgmsg_size);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined(_KERNEL) || !defined(__linux__)
|
||||
|
@ -567,7 +567,7 @@ out_check:
|
||||
return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL));
|
||||
return (0);
|
||||
#else
|
||||
return ENOTSUP;
|
||||
return (ENOTSUP);
|
||||
#endif /* HAVE_MLSLABEL */
|
||||
}
|
||||
|
||||
@ -4914,7 +4914,7 @@ zfs_ioc_events_clear(zfs_cmd_t *zc)
|
||||
zfs_zevent_drain_all(&count);
|
||||
zc->zc_cookie = count;
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5427,14 +5427,17 @@ zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which)
|
||||
zs = list_next(&zfsdev_state_list, zs)) {
|
||||
if (zs->zs_minor == minor) {
|
||||
switch (which) {
|
||||
case ZST_ONEXIT: return (zs->zs_onexit);
|
||||
case ZST_ZEVENT: return (zs->zs_zevent);
|
||||
case ZST_ALL: return (zs);
|
||||
case ZST_ONEXIT:
|
||||
return (zs->zs_onexit);
|
||||
case ZST_ZEVENT:
|
||||
return (zs->zs_zevent);
|
||||
case ZST_ALL:
|
||||
return (zs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void *
|
||||
@ -5446,7 +5449,7 @@ zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
|
||||
ptr = zfsdev_get_state_impl(minor, which);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
|
||||
return ptr;
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
minor_t
|
||||
@ -5494,7 +5497,7 @@ zfsdev_state_init(struct file *filp)
|
||||
if (minor == 0)
|
||||
return (SET_ERROR(ENXIO));
|
||||
|
||||
zs = kmem_zalloc( sizeof(zfsdev_state_t), KM_SLEEP);
|
||||
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
|
||||
|
||||
zs->zs_file = filp;
|
||||
zs->zs_minor = minor;
|
||||
@ -5521,9 +5524,9 @@ zfsdev_state_destroy(struct file *filp)
|
||||
zfs_zevent_destroy(zs->zs_zevent);
|
||||
|
||||
list_remove(&zfsdev_state_list, zs);
|
||||
kmem_free(zs, sizeof(zfsdev_state_t));
|
||||
kmem_free(zs, sizeof (zfsdev_state_t));
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -5623,7 +5626,7 @@ zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
|
||||
goto out;
|
||||
|
||||
/* legacy ioctls can modify zc_name */
|
||||
(void) strlcpy(saved_poolname, zc->zc_name, sizeof(saved_poolname));
|
||||
(void) strlcpy(saved_poolname, zc->zc_name, sizeof (saved_poolname));
|
||||
len = strcspn(saved_poolname, "/@") + 1;
|
||||
saved_poolname[len] = '\0';
|
||||
|
||||
@ -5702,7 +5705,7 @@ out:
|
||||
static long
|
||||
zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
|
||||
{
|
||||
return zfsdev_ioctl(filp, cmd, arg);
|
||||
return (zfsdev_ioctl(filp, cmd, arg));
|
||||
}
|
||||
#else
|
||||
#define zfsdev_compat_ioctl NULL
|
||||
|
@ -550,7 +550,7 @@ zfs_range_unlock(rl_t *rl)
|
||||
ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER);
|
||||
ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0);
|
||||
ASSERT(!rl->r_proxy);
|
||||
list_create(&free_list, sizeof(rl_t), offsetof(rl_t, rl_node));
|
||||
list_create(&free_list, sizeof (rl_t), offsetof(rl_t, rl_node));
|
||||
|
||||
mutex_enter(&zp->z_range_lock);
|
||||
if (rl->r_type == RL_WRITER) {
|
||||
|
@ -310,7 +310,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
}
|
||||
|
||||
/* First do a bulk query of the attributes that aren't cached */
|
||||
bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
|
||||
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16);
|
||||
@ -324,7 +324,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
&znode_acl, 88);
|
||||
|
||||
if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
|
||||
kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20);
|
||||
kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -333,7 +333,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
* it is such a way to pick up an already existing layout number
|
||||
*/
|
||||
count = 0;
|
||||
sa_attrs = kmem_zalloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
|
||||
sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
|
||||
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
|
||||
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL,
|
||||
&zp->z_size, 8);
|
||||
@ -390,8 +390,8 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
znode_acl.z_acl_extern_obj, tx));
|
||||
|
||||
zp->z_is_sa = B_TRUE;
|
||||
kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * 20);
|
||||
kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20);
|
||||
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * 20);
|
||||
kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
|
||||
done:
|
||||
if (drop_lock)
|
||||
mutex_exit(&zp->z_lock);
|
||||
|
@ -1249,10 +1249,12 @@ zfs_domount(struct super_block *sb, void *data, int silent)
|
||||
|
||||
atime_changed_cb(zsb, B_FALSE);
|
||||
readonly_changed_cb(zsb, B_TRUE);
|
||||
if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
|
||||
if ((error = dsl_prop_get_integer(osname,
|
||||
"xattr", &pval, NULL)))
|
||||
goto out;
|
||||
xattr_changed_cb(zsb, pval);
|
||||
if ((error = dsl_prop_get_integer(osname,"acltype",&pval,NULL)))
|
||||
if ((error = dsl_prop_get_integer(osname,
|
||||
"acltype", &pval, NULL)))
|
||||
goto out;
|
||||
acltype_changed_cb(zsb, pval);
|
||||
zsb->z_issnap = B_TRUE;
|
||||
|
@ -2500,11 +2500,11 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
|
||||
*/
|
||||
xoap = xva_getxoptattr(xvap);
|
||||
|
||||
tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP);
|
||||
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
|
||||
xva_init(tmpxvattr);
|
||||
|
||||
bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
|
||||
xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
|
||||
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
|
||||
xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP);
|
||||
|
||||
/*
|
||||
* Immutable files can only alter immutable bit and atime
|
||||
@ -2528,8 +2528,10 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
|
||||
* once large timestamps are fully supported.
|
||||
*/
|
||||
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
|
||||
if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
|
||||
((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
|
||||
if (((mask & ATTR_ATIME) &&
|
||||
TIMESPEC_OVERFLOW(&vap->va_atime)) ||
|
||||
((mask & ATTR_MTIME) &&
|
||||
TIMESPEC_OVERFLOW(&vap->va_mtime))) {
|
||||
err = EOVERFLOW;
|
||||
goto out3;
|
||||
}
|
||||
@ -3040,9 +3042,9 @@ out2:
|
||||
zil_commit(zilog, 0);
|
||||
|
||||
out3:
|
||||
kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7);
|
||||
kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7);
|
||||
kmem_free(tmpxvattr, sizeof(xvattr_t));
|
||||
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7);
|
||||
kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7);
|
||||
kmem_free(tmpxvattr, sizeof (xvattr_t));
|
||||
ZFS_EXIT(zsb);
|
||||
return (err);
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
|
||||
error:
|
||||
unlock_new_inode(ip);
|
||||
iput(ip);
|
||||
return NULL;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -647,7 +647,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
|
||||
* order for DMU_OT_ZNODE is critical since it needs to be constructed
|
||||
* in the old znode_phys_t format. Don't change this ordering
|
||||
*/
|
||||
sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE);
|
||||
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE);
|
||||
|
||||
if (obj_type == DMU_OT_ZNODE) {
|
||||
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
|
||||
@ -749,7 +749,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
|
||||
err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
|
||||
ASSERT0(err);
|
||||
}
|
||||
kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END);
|
||||
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
|
||||
ZFS_OBJ_HOLD_EXIT(zsb, obj);
|
||||
}
|
||||
|
||||
|
@ -319,7 +319,7 @@ zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
|
||||
char *lrbuf, *lrp;
|
||||
int error = 0;
|
||||
|
||||
bzero(&next_blk, sizeof(blkptr_t));
|
||||
bzero(&next_blk, sizeof (blkptr_t));
|
||||
|
||||
/*
|
||||
* Old logs didn't record the maximum zh_claim_lr_seq.
|
||||
@ -1017,13 +1017,10 @@ zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
|
||||
use_slog = USE_SLOG(zilog);
|
||||
error = zio_alloc_zil(spa, txg, bp, zil_blksz,
|
||||
USE_SLOG(zilog));
|
||||
if (use_slog)
|
||||
{
|
||||
if (use_slog) {
|
||||
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
|
||||
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
|
||||
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
|
||||
}
|
||||
@ -1134,12 +1131,14 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
|
||||
dbuf = lr_buf + reclen;
|
||||
lrw->lr_common.lrc_reclen += dlen;
|
||||
ZIL_STAT_BUMP(zil_itx_needcopy_count);
|
||||
ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length);
|
||||
ZIL_STAT_INCR(zil_itx_needcopy_bytes,
|
||||
lrw->lr_length);
|
||||
} else {
|
||||
ASSERT(itx->itx_wr_state == WR_INDIRECT);
|
||||
dbuf = NULL;
|
||||
ZIL_STAT_BUMP(zil_itx_indirect_count);
|
||||
ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length);
|
||||
ZIL_STAT_INCR(zil_itx_indirect_bytes,
|
||||
lrw->lr_length);
|
||||
}
|
||||
error = zilog->zl_get_data(
|
||||
itx->itx_private, lrw, dbuf, lwb->lwb_zio);
|
||||
@ -1344,7 +1343,8 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
|
||||
}
|
||||
ASSERT(itxg->itxg_sod == 0);
|
||||
itxg->itxg_txg = txg;
|
||||
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_PUSHPAGE);
|
||||
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
|
||||
KM_PUSHPAGE);
|
||||
|
||||
list_create(&itxs->i_sync_list, sizeof (itx_t),
|
||||
offsetof(itx_t, itx_node));
|
||||
@ -1364,7 +1364,8 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
|
||||
|
||||
ian = avl_find(t, &foid, &where);
|
||||
if (ian == NULL) {
|
||||
ian = kmem_alloc(sizeof (itx_async_node_t), KM_PUSHPAGE);
|
||||
ian = kmem_alloc(sizeof (itx_async_node_t),
|
||||
KM_PUSHPAGE);
|
||||
list_create(&ian->ia_list, sizeof (itx_t),
|
||||
offsetof(itx_t, itx_node));
|
||||
ian->ia_foid = foid;
|
||||
@ -1744,7 +1745,7 @@ zil_init(void)
|
||||
sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
|
||||
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
|
||||
KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t),
|
||||
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
|
||||
KSTAT_FLAG_VIRTUAL);
|
||||
|
||||
if (zil_ksp != NULL) {
|
||||
|
@ -132,7 +132,7 @@ zio_init(void)
|
||||
zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM);
|
||||
zio_link_cache = kmem_cache_create("zio_link_cache",
|
||||
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
|
||||
zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof(vdev_io_t),
|
||||
zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof (vdev_io_t),
|
||||
PAGESIZE, NULL, NULL, NULL, NULL, NULL, KMC_VMEM);
|
||||
|
||||
/*
|
||||
@ -1852,11 +1852,11 @@ static void
|
||||
zio_write_gang_member_ready(zio_t *zio)
|
||||
{
|
||||
zio_t *pio = zio_unique_parent(zio);
|
||||
ASSERTV(zio_t *gio = zio->io_gang_leader;)
|
||||
dva_t *cdva = zio->io_bp->blk_dva;
|
||||
dva_t *pdva = pio->io_bp->blk_dva;
|
||||
uint64_t asize;
|
||||
int d;
|
||||
ASSERTV(zio_t *gio = zio->io_gang_leader);
|
||||
|
||||
if (BP_IS_HOLE(zio->io_bp))
|
||||
return;
|
||||
@ -2995,15 +2995,18 @@ zio_done(zio_t *zio)
|
||||
if (zio->io_bp != NULL) {
|
||||
ASSERT(zio->io_bp->blk_pad[0] == 0);
|
||||
ASSERT(zio->io_bp->blk_pad[1] == 0);
|
||||
ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 ||
|
||||
ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy,
|
||||
sizeof (blkptr_t)) == 0 ||
|
||||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
|
||||
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
|
||||
zio->io_bp_override == NULL &&
|
||||
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
|
||||
ASSERT(!BP_SHOULD_BYTESWAP(zio->io_bp));
|
||||
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
|
||||
ASSERT3U(zio->io_prop.zp_copies, <=,
|
||||
BP_GET_NDVAS(zio->io_bp));
|
||||
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
|
||||
(BP_COUNT_GANG(zio->io_bp) == BP_GET_NDVAS(zio->io_bp)));
|
||||
(BP_COUNT_GANG(zio->io_bp) ==
|
||||
BP_GET_NDVAS(zio->io_bp)));
|
||||
}
|
||||
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
|
||||
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
|
||||
@ -3030,7 +3033,7 @@ zio_done(zio_t *zio)
|
||||
if (asize != zio->io_size) {
|
||||
abuf = zio_buf_alloc(asize);
|
||||
bcopy(zio->io_data, abuf, zio->io_size);
|
||||
bzero(abuf + zio->io_size, asize - zio->io_size);
|
||||
bzero(abuf+zio->io_size, asize-zio->io_size);
|
||||
}
|
||||
|
||||
zio->io_cksum_report = zcr->zcr_next;
|
||||
@ -3078,8 +3081,8 @@ zio_done(zio_t *zio)
|
||||
* error and generate a logical data ereport.
|
||||
*/
|
||||
spa_log_error(zio->io_spa, zio);
|
||||
zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, NULL, zio,
|
||||
0, 0);
|
||||
zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa,
|
||||
NULL, zio, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3355,13 +3358,13 @@ MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O");
|
||||
|
||||
module_param(zfs_sync_pass_deferred_free, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_sync_pass_deferred_free,
|
||||
"defer frees starting in this pass");
|
||||
"Defer frees starting in this pass");
|
||||
|
||||
module_param(zfs_sync_pass_dont_compress, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_sync_pass_dont_compress,
|
||||
"don't compress starting in this pass");
|
||||
"Don't compress starting in this pass");
|
||||
|
||||
module_param(zfs_sync_pass_rewrite, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_sync_pass_rewrite,
|
||||
"rewrite new bps starting in this pass");
|
||||
"Rewrite new bps starting in this pass");
|
||||
#endif
|
||||
|
@ -43,7 +43,7 @@ zpl_common_open(struct inode *ip, struct file *filp)
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
return (-EACCES);
|
||||
|
||||
return generic_file_open(ip, filp);
|
||||
return (generic_file_open(ip, filp));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -129,12 +129,12 @@ zpl_root_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags)
|
||||
|
||||
if (error) {
|
||||
if (error == -ENOENT)
|
||||
return d_splice_alias(NULL, dentry);
|
||||
return (d_splice_alias(NULL, dentry));
|
||||
else
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
}
|
||||
|
||||
return d_splice_alias(ip, dentry);
|
||||
return (d_splice_alias(ip, dentry));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -174,7 +174,7 @@ zpl_snapdir_automount(struct path *path)
|
||||
error = -zfsctl_mount_snapshot(path, 0);
|
||||
dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
|
||||
/*
|
||||
* Rather than returning the new vfsmount for the snapshot we must
|
||||
@ -198,7 +198,7 @@ zpl_snapdir_revalidate(struct dentry *dentry, struct nameidata *i)
|
||||
zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
#endif
|
||||
{
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
dentry_operations_t zpl_dops_snapdirs = {
|
||||
@ -237,13 +237,13 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
|
||||
crfree(cr);
|
||||
|
||||
if (error && error != -ENOENT)
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
|
||||
ASSERT(error == 0 || ip == NULL);
|
||||
d_clear_d_op(dentry);
|
||||
d_set_d_op(dentry, &zpl_dops_snapdirs);
|
||||
|
||||
return d_splice_alias(ip, dentry);
|
||||
return (d_splice_alias(ip, dentry));
|
||||
}
|
||||
|
||||
static int
|
||||
@ -334,7 +334,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, zpl_umode_t mode)
|
||||
int error;
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
zpl_vap_init(vap, dip, mode | S_IFDIR, cr);
|
||||
|
||||
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
|
||||
@ -344,7 +344,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, zpl_umode_t mode)
|
||||
d_instantiate(dentry, ip);
|
||||
}
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
ASSERT3S(error, <=, 0);
|
||||
crfree(cr);
|
||||
|
||||
@ -423,12 +423,12 @@ zpl_shares_lookup(struct inode *dip, struct dentry *dentry,
|
||||
|
||||
if (error) {
|
||||
if (error == -ENOENT)
|
||||
return d_splice_alias(NULL, dentry);
|
||||
return (d_splice_alias(NULL, dentry));
|
||||
else
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
}
|
||||
|
||||
return d_splice_alias(ip, dentry);
|
||||
return (d_splice_alias(ip, dentry));
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -45,7 +45,7 @@ zpl_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, int connectable)
|
||||
len_bytes = *max_len * sizeof (__u32);
|
||||
|
||||
if (len_bytes < offsetof(fid_t, fid_data))
|
||||
return 255;
|
||||
return (255);
|
||||
|
||||
fid->fid_len = len_bytes - offsetof(fid_t, fid_data);
|
||||
|
||||
@ -76,7 +76,7 @@ zpl_dentry_obtain_alias(struct inode *ip)
|
||||
}
|
||||
#endif /* HAVE_D_OBTAIN_ALIAS */
|
||||
|
||||
return result;
|
||||
return (result);
|
||||
}
|
||||
|
||||
static struct dentry *
|
||||
@ -92,16 +92,16 @@ zpl_fh_to_dentry(struct super_block *sb, struct fid *fh,
|
||||
if (fh_type != FILEID_INO32_GEN ||
|
||||
len_bytes < offsetof(fid_t, fid_data) ||
|
||||
len_bytes < offsetof(fid_t, fid_data) + fid->fid_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return (ERR_PTR(-EINVAL));
|
||||
|
||||
rc = zfs_vget(sb, &ip, fid);
|
||||
|
||||
if (rc != 0)
|
||||
return ERR_PTR(-rc);
|
||||
return (ERR_PTR(-rc));
|
||||
|
||||
ASSERT((ip != NULL) && !IS_ERR(ip));
|
||||
|
||||
return zpl_dentry_obtain_alias(ip);
|
||||
return (zpl_dentry_obtain_alias(ip));
|
||||
}
|
||||
|
||||
static struct dentry *
|
||||
@ -117,9 +117,9 @@ zpl_get_parent(struct dentry *child)
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
|
||||
return zpl_dentry_obtain_alias(ip);
|
||||
return (zpl_dentry_obtain_alias(ip));
|
||||
}
|
||||
|
||||
#ifdef HAVE_COMMIT_METADATA
|
||||
@ -134,7 +134,7 @@ zpl_commit_metadata(struct inode *inode)
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
#endif /* HAVE_COMMIT_METADATA */
|
||||
|
||||
@ -143,6 +143,6 @@ const struct export_operations zpl_export_operations = {
|
||||
.fh_to_dentry = zpl_fh_to_dentry,
|
||||
.get_parent = zpl_get_parent,
|
||||
#ifdef HAVE_COMMIT_METADATA
|
||||
.commit_metadata= zpl_commit_metadata,
|
||||
.commit_metadata = zpl_commit_metadata,
|
||||
#endif /* HAVE_COMMIT_METADATA */
|
||||
};
|
||||
|
@ -280,7 +280,7 @@ zpl_llseek(struct file *filp, loff_t offset, int whence)
|
||||
}
|
||||
#endif /* SEEK_HOLE && SEEK_DATA */
|
||||
|
||||
return generic_file_llseek(filp, offset, whence);
|
||||
return (generic_file_llseek(filp, offset, whence));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -381,7 +381,7 @@ zpl_readpage(struct file *filp, struct page *pp)
|
||||
}
|
||||
|
||||
unlock_page(pp);
|
||||
return error;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -536,7 +536,7 @@ zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
static long
|
||||
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
return zpl_ioctl(filp, cmd, arg);
|
||||
return (zpl_ioctl(filp, cmd, arg));
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
@ -42,7 +42,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
|
||||
int error;
|
||||
|
||||
if (dlen(dentry) > ZFS_MAXNAMELEN)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
return (ERR_PTR(-ENAMETOOLONG));
|
||||
|
||||
crhold(cr);
|
||||
error = -zfs_lookup(dir, dname(dentry), &ip, 0, cr, NULL, NULL);
|
||||
@ -58,12 +58,12 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
|
||||
|
||||
if (error) {
|
||||
if (error == -ENOENT)
|
||||
return d_splice_alias(NULL, dentry);
|
||||
return (d_splice_alias(NULL, dentry));
|
||||
else
|
||||
return ERR_PTR(error);
|
||||
return (ERR_PTR(error));
|
||||
}
|
||||
|
||||
return d_splice_alias(ip, dentry);
|
||||
return (d_splice_alias(ip, dentry));
|
||||
}
|
||||
|
||||
void
|
||||
@ -97,7 +97,7 @@ zpl_create(struct inode *dir, struct dentry *dentry, zpl_umode_t mode,
|
||||
int error;
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
zpl_vap_init(vap, dir, mode, cr);
|
||||
|
||||
error = -zfs_create(dir, dname(dentry), vap, 0, mode, &ip, cr, 0, NULL);
|
||||
@ -107,7 +107,7 @@ zpl_create(struct inode *dir, struct dentry *dentry, zpl_umode_t mode,
|
||||
d_instantiate(dentry, ip);
|
||||
}
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
@ -131,7 +131,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, zpl_umode_t mode,
|
||||
ASSERT(rdev == 0);
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
zpl_vap_init(vap, dir, mode, cr);
|
||||
vap->va_rdev = rdev;
|
||||
|
||||
@ -142,7 +142,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, zpl_umode_t mode,
|
||||
d_instantiate(dentry, ip);
|
||||
}
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
@ -172,7 +172,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode)
|
||||
int error;
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
zpl_vap_init(vap, dir, mode | S_IFDIR, cr);
|
||||
|
||||
error = -zfs_mkdir(dir, dname(dentry), vap, &ip, cr, 0, NULL);
|
||||
@ -182,7 +182,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode)
|
||||
d_instantiate(dentry, ip);
|
||||
}
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
@ -239,7 +239,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
|
||||
return (error);
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
vap->va_mask = ia->ia_valid & ATTR_IATTR_MASK;
|
||||
vap->va_mode = ia->ia_mode;
|
||||
vap->va_uid = KUID_TO_SUID(ia->ia_uid);
|
||||
@ -253,7 +253,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
|
||||
if (!error && (ia->ia_valid & ATTR_MODE))
|
||||
error = zpl_chmod_acl(ip);
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
@ -284,7 +284,7 @@ zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name)
|
||||
int error;
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
zpl_vap_init(vap, dir, S_IFLNK | S_IRWXUGO, cr);
|
||||
|
||||
error = -zfs_symlink(dir, dname(dentry), vap, (char *)name, &ip, cr, 0);
|
||||
@ -293,7 +293,7 @@ zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name)
|
||||
d_instantiate(dentry, ip);
|
||||
}
|
||||
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
crfree(cr);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
@ -349,7 +349,7 @@ zpl_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
||||
int error;
|
||||
|
||||
if (ip->i_nlink >= ZFS_LINK_MAX)
|
||||
return -EMLINK;
|
||||
return (-EMLINK);
|
||||
|
||||
crhold(cr);
|
||||
ip->i_ctime = CURRENT_TIME_SEC;
|
||||
@ -371,7 +371,7 @@ out:
|
||||
|
||||
#ifdef HAVE_INODE_TRUNCATE_RANGE
|
||||
static void
|
||||
zpl_truncate_range(struct inode* ip, loff_t start, loff_t end)
|
||||
zpl_truncate_range(struct inode *ip, loff_t start, loff_t end)
|
||||
{
|
||||
cred_t *cr = CRED();
|
||||
flock64_t bf;
|
||||
@ -402,7 +402,7 @@ zpl_truncate_range(struct inode* ip, loff_t start, loff_t end)
|
||||
static long
|
||||
zpl_fallocate(struct inode *ip, int mode, loff_t offset, loff_t len)
|
||||
{
|
||||
return zpl_fallocate_common(ip, mode, offset, len);
|
||||
return (zpl_fallocate_common(ip, mode, offset, len));
|
||||
}
|
||||
#endif /* HAVE_INODE_FALLOCATE */
|
||||
|
||||
|
@ -216,13 +216,13 @@ __zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb)
|
||||
static int
|
||||
zpl_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
return __zpl_show_options(seq, root->d_sb->s_fs_info);
|
||||
return (__zpl_show_options(seq, root->d_sb->s_fs_info));
|
||||
}
|
||||
#else
|
||||
static int
|
||||
zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp)
|
||||
{
|
||||
return __zpl_show_options(seq, vfsp->mnt_sb->s_fs_info);
|
||||
return (__zpl_show_options(seq, vfsp->mnt_sb->s_fs_info));
|
||||
}
|
||||
#endif /* HAVE_SHOW_OPTIONS_WITH_DENTRY */
|
||||
|
||||
@ -244,7 +244,7 @@ zpl_mount(struct file_system_type *fs_type, int flags,
|
||||
{
|
||||
zpl_mount_data_t zmd = { osname, data };
|
||||
|
||||
return mount_nodev(fs_type, flags, &zmd, zpl_fill_super);
|
||||
return (mount_nodev(fs_type, flags, &zmd, zpl_fill_super));
|
||||
}
|
||||
#else
|
||||
static int
|
||||
@ -253,7 +253,7 @@ zpl_get_sb(struct file_system_type *fs_type, int flags,
|
||||
{
|
||||
zpl_mount_data_t zmd = { osname, data };
|
||||
|
||||
return get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt);
|
||||
return (get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt));
|
||||
}
|
||||
#endif /* HAVE_MOUNT_NODEV */
|
||||
|
||||
@ -287,14 +287,12 @@ zpl_prune_sb(struct super_block *sb, void *arg)
|
||||
|
||||
error = -zfs_sb_prune(sb, *(unsigned long *)arg, &objects);
|
||||
ASSERT3S(error, <=, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
zpl_prune_sbs(int64_t bytes_to_scan, void *private)
|
||||
{
|
||||
unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t));
|
||||
unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t));
|
||||
|
||||
iterate_supers_type(&zpl_fs_type, zpl_prune_sb, &nr_to_scan);
|
||||
kmem_reap();
|
||||
@ -311,7 +309,7 @@ zpl_prune_sbs(int64_t bytes_to_scan, void *private)
|
||||
void
|
||||
zpl_prune_sbs(int64_t bytes_to_scan, void *private)
|
||||
{
|
||||
unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t));
|
||||
unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t));
|
||||
|
||||
shrink_dcache_memory(nr_to_scan, GFP_KERNEL);
|
||||
shrink_icache_memory(nr_to_scan, GFP_KERNEL);
|
||||
@ -344,7 +342,7 @@ zpl_nr_cached_objects(struct super_block *sb)
|
||||
static void
|
||||
zpl_free_cached_objects(struct super_block *sb, int nr_to_scan)
|
||||
{
|
||||
arc_adjust_meta(nr_to_scan * sizeof(znode_t), B_FALSE);
|
||||
arc_adjust_meta(nr_to_scan * sizeof (znode_t), B_FALSE);
|
||||
}
|
||||
#endif /* HAVE_FREE_CACHED_OBJECTS */
|
||||
|
||||
|
@ -94,11 +94,11 @@ typedef struct xattr_filldir {
|
||||
static int
|
||||
zpl_xattr_filldir(xattr_filldir_t *xf, const char *name, int name_len)
|
||||
{
|
||||
if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
|
||||
if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) == 0)
|
||||
if (!(ITOZSB(xf->inode)->z_flags & ZSB_XATTR))
|
||||
return (0);
|
||||
|
||||
if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
|
||||
if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return (0);
|
||||
|
||||
@ -389,7 +389,7 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
|
||||
|
||||
/* Lookup failed create a new xattr. */
|
||||
if (xip == NULL) {
|
||||
vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
vap->va_mode = xattr_mode;
|
||||
vap->va_mask = ATTR_MODE;
|
||||
vap->va_uid = crgetfsuid(cr);
|
||||
@ -413,7 +413,7 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
|
||||
|
||||
out:
|
||||
if (vap)
|
||||
kmem_free(vap, sizeof(vattr_t));
|
||||
kmem_free(vap, sizeof (vattr_t));
|
||||
|
||||
if (xip)
|
||||
iput(xip);
|
||||
@ -534,10 +534,10 @@ __zpl_xattr_user_get(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
|
||||
return -EOPNOTSUPP;
|
||||
return (-EOPNOTSUPP);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
|
||||
error = zpl_xattr_get(ip, xattr_name, value, size);
|
||||
@ -555,10 +555,10 @@ __zpl_xattr_user_set(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
|
||||
return -EOPNOTSUPP;
|
||||
return (-EOPNOTSUPP);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
|
||||
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
|
||||
@ -582,10 +582,10 @@ __zpl_xattr_trusted_get(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
return (-EACCES);
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
|
||||
error = zpl_xattr_get(ip, xattr_name, value, size);
|
||||
@ -603,10 +603,10 @@ __zpl_xattr_trusted_set(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
return (-EACCES);
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
|
||||
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
|
||||
@ -630,7 +630,7 @@ __zpl_xattr_security_get(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
|
||||
error = zpl_xattr_get(ip, xattr_name, value, size);
|
||||
@ -648,7 +648,7 @@ __zpl_xattr_security_set(struct inode *ip, const char *name,
|
||||
int error;
|
||||
|
||||
if (strcmp(name, "") == 0)
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
|
||||
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
|
||||
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
|
||||
@ -699,7 +699,8 @@ zpl_xattr_security_init(struct inode *ip, struct inode *dip,
|
||||
&name, &value, &len);
|
||||
if (error) {
|
||||
if (error == -EOPNOTSUPP)
|
||||
return 0;
|
||||
return (0);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -731,7 +732,7 @@ zpl_set_acl(struct inode *ip, int type, struct posix_acl *acl)
|
||||
if (S_ISLNK(ip->i_mode))
|
||||
return (-EOPNOTSUPP);
|
||||
|
||||
switch(type) {
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name = POSIX_ACL_XATTR_ACCESS;
|
||||
if (acl) {
|
||||
@ -816,7 +817,7 @@ zpl_get_acl(struct inode *ip, int type)
|
||||
name = POSIX_ACL_XATTR_DEFAULT;
|
||||
break;
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
return (ERR_PTR(-EINVAL));
|
||||
}
|
||||
|
||||
size = zpl_xattr_get(ip, name, NULL, 0);
|
||||
@ -866,25 +867,25 @@ __zpl_check_acl(struct inode *ip, int mask)
|
||||
int
|
||||
zpl_check_acl(struct inode *ip, int mask, unsigned int flags)
|
||||
{
|
||||
return __zpl_check_acl(ip, mask);
|
||||
return (__zpl_check_acl(ip, mask));
|
||||
}
|
||||
#elif defined(HAVE_CHECK_ACL)
|
||||
int
|
||||
zpl_check_acl(struct inode *ip, int mask)
|
||||
{
|
||||
return __zpl_check_acl(ip , mask);
|
||||
return (__zpl_check_acl(ip, mask));
|
||||
}
|
||||
#elif defined(HAVE_PERMISSION_WITH_NAMEIDATA)
|
||||
int
|
||||
zpl_permission(struct inode *ip, int mask, struct nameidata *nd)
|
||||
{
|
||||
return generic_permission(ip, mask, __zpl_check_acl);
|
||||
return (generic_permission(ip, mask, __zpl_check_acl));
|
||||
}
|
||||
#elif defined(HAVE_PERMISSION)
|
||||
int
|
||||
zpl_permission(struct inode *ip, int mask)
|
||||
{
|
||||
return generic_permission(ip, mask, __zpl_check_acl);
|
||||
return (generic_permission(ip, mask, __zpl_check_acl));
|
||||
}
|
||||
#endif /* HAVE_CHECK_ACL | HAVE_PERMISSION */
|
||||
#endif /* !HAVE_GET_ACL */
|
||||
@ -923,7 +924,7 @@ zpl_init_acl(struct inode *ip, struct inode *dir)
|
||||
}
|
||||
|
||||
mode = ip->i_mode;
|
||||
error = posix_acl_create(&acl,GFP_KERNEL, &mode);
|
||||
error = posix_acl_create(&acl, GFP_KERNEL, &mode);
|
||||
if (error >= 0) {
|
||||
ip->i_mode = mode;
|
||||
mark_inode_dirty(ip);
|
||||
@ -953,9 +954,9 @@ zpl_chmod_acl(struct inode *ip)
|
||||
if (IS_ERR(acl) || !acl)
|
||||
return (PTR_ERR(acl));
|
||||
|
||||
error = posix_acl_chmod(&acl,GFP_KERNEL, ip->i_mode);
|
||||
error = posix_acl_chmod(&acl, GFP_KERNEL, ip->i_mode);
|
||||
if (!error)
|
||||
error = zpl_set_acl(ip,ACL_TYPE_ACCESS, acl);
|
||||
error = zpl_set_acl(ip, ACL_TYPE_ACCESS, acl);
|
||||
|
||||
zpl_posix_acl_release(acl);
|
||||
|
||||
@ -975,11 +976,11 @@ zpl_xattr_acl_list(struct inode *ip, char *list, size_t list_size,
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
xattr_name = POSIX_ACL_XATTR_ACCESS;
|
||||
xattr_size = sizeof(xattr_name);
|
||||
xattr_size = sizeof (xattr_name);
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
xattr_name = POSIX_ACL_XATTR_DEFAULT;
|
||||
xattr_size = sizeof(xattr_name);
|
||||
xattr_size = sizeof (xattr_name);
|
||||
break;
|
||||
default:
|
||||
return (0);
|
||||
@ -1060,7 +1061,7 @@ zpl_xattr_acl_get_access(struct dentry *dentry, const char *name,
|
||||
void *buffer, size_t size, int type)
|
||||
{
|
||||
ASSERT3S(type, ==, ACL_TYPE_ACCESS);
|
||||
return zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type);
|
||||
return (zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type));
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1068,7 +1069,7 @@ zpl_xattr_acl_get_default(struct dentry *dentry, const char *name,
|
||||
void *buffer, size_t size, int type)
|
||||
{
|
||||
ASSERT3S(type, ==, ACL_TYPE_DEFAULT);
|
||||
return zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type);
|
||||
return (zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type));
|
||||
}
|
||||
|
||||
#else
|
||||
@ -1077,14 +1078,14 @@ static int
|
||||
zpl_xattr_acl_get_access(struct inode *ip, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
return zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_ACCESS);
|
||||
return (zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_ACCESS));
|
||||
}
|
||||
|
||||
static int
|
||||
zpl_xattr_acl_get_default(struct inode *ip, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
return zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_DEFAULT);
|
||||
return (zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_DEFAULT));
|
||||
}
|
||||
#endif /* HAVE_DENTRY_XATTR_GET */
|
||||
|
||||
@ -1131,13 +1132,13 @@ zpl_xattr_acl_set_access(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags, int type)
|
||||
{
|
||||
ASSERT3S(type, ==, ACL_TYPE_ACCESS);
|
||||
return zpl_xattr_acl_set(dentry->d_inode,
|
||||
name, value, size, flags, type);
|
||||
return (zpl_xattr_acl_set(dentry->d_inode,
|
||||
name, value, size, flags, type));
|
||||
}
|
||||
|
||||
static int
|
||||
zpl_xattr_acl_set_default(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size,int flags, int type)
|
||||
const void *value, size_t size, int flags, int type)
|
||||
{
|
||||
ASSERT3S(type, ==, ACL_TYPE_DEFAULT);
|
||||
return zpl_xattr_acl_set(dentry->d_inode,
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*****************************************************************************\
|
||||
/*
|
||||
* ZPIOS is a heavily modified version of the original PIOS test code.
|
||||
* It is designed to have the test code running in the Linux kernel
|
||||
* against ZFS while still being flexibly controled from user space.
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with ZPIOS. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
*/
|
||||
|
||||
#include <sys/zfs_context.h>
|
||||
#include <sys/dmu.h>
|
||||
@ -43,10 +43,11 @@ static spl_class *zpios_class;
|
||||
static spl_device *zpios_device;
|
||||
static char *zpios_tag = "zpios_tag";
|
||||
|
||||
static
|
||||
int zpios_upcall(char *path, char *phase, run_args_t *run_args, int rc)
|
||||
static int
|
||||
zpios_upcall(char *path, char *phase, run_args_t *run_args, int rc)
|
||||
{
|
||||
/* This is stack heavy but it should be OK since we are only
|
||||
/*
|
||||
* This is stack heavy but it should be OK since we are only
|
||||
* making the upcall between tests when the stack is shallow.
|
||||
*/
|
||||
char id[16], chunk_size[16], region_size[16], thread_count[16];
|
||||
@ -55,11 +56,11 @@ int zpios_upcall(char *path, char *phase, run_args_t *run_args, int rc)
|
||||
char *argv[16], *envp[4];
|
||||
|
||||
if ((path == NULL) || (strlen(path) == 0))
|
||||
return -ENOENT;
|
||||
return (-ENOENT);
|
||||
|
||||
snprintf(id, 15, "%d", run_args->id);
|
||||
snprintf(chunk_size, 15, "%lu", (long unsigned)run_args->chunk_size);
|
||||
snprintf(region_size, 15, "%lu",(long unsigned) run_args->region_size);
|
||||
snprintf(region_size, 15, "%lu", (long unsigned) run_args->region_size);
|
||||
snprintf(thread_count, 15, "%u", run_args->thread_count);
|
||||
snprintf(region_count, 15, "%u", run_args->region_count);
|
||||
snprintf(offset, 15, "%lu", (long unsigned)run_args->offset);
|
||||
@ -93,7 +94,36 @@ int zpios_upcall(char *path, char *phase, run_args_t *run_args, int rc)
|
||||
envp[2] = "PATH=/sbin:/usr/sbin:/bin:/usr/bin";
|
||||
envp[3] = NULL;
|
||||
|
||||
return call_usermodehelper(path, argv, envp, UMH_WAIT_PROC);
|
||||
return (call_usermodehelper(path, argv, envp, UMH_WAIT_PROC));
|
||||
}
|
||||
|
||||
static int
|
||||
zpios_print(struct file *file, const char *format, ...)
|
||||
{
|
||||
zpios_info_t *info = (zpios_info_t *)file->private_data;
|
||||
va_list adx;
|
||||
int rc;
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
va_start(adx, format);
|
||||
spin_lock(&info->info_lock);
|
||||
|
||||
/* Don't allow the kernel to start a write in the red zone */
|
||||
if ((int)(info->info_head - info->info_buffer) >
|
||||
(info->info_size - ZPIOS_INFO_BUFFER_REDZONE)) {
|
||||
rc = -EOVERFLOW;
|
||||
} else {
|
||||
rc = vsprintf(info->info_head, format, adx);
|
||||
if (rc >= 0)
|
||||
info->info_head += rc;
|
||||
}
|
||||
|
||||
spin_unlock(&info->info_lock);
|
||||
va_end(adx);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
@ -110,22 +140,21 @@ zpios_dmu_object_create(run_args_t *run_args, objset_t *os)
|
||||
zpios_print(run_args->file,
|
||||
"dmu_tx_assign() failed: %d\n", rc);
|
||||
dmu_tx_abort(tx);
|
||||
return obj;
|
||||
return (obj);
|
||||
}
|
||||
|
||||
obj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
obj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, DMU_OT_NONE, 0, tx);
|
||||
rc = dmu_object_set_blocksize(os, obj, 128ULL << 10, 0, tx);
|
||||
if (rc) {
|
||||
zpios_print(run_args->file,
|
||||
"dmu_object_set_blocksize() failed: %d\n", rc);
|
||||
dmu_tx_abort(tx);
|
||||
return obj;
|
||||
return (obj);
|
||||
}
|
||||
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
return obj;
|
||||
return (obj);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -141,7 +170,7 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
|
||||
zpios_print(run_args->file,
|
||||
"dmu_tx_assign() failed: %d\n", rc);
|
||||
dmu_tx_abort(tx);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
rc = dmu_object_free(os, obj, tx);
|
||||
@ -149,12 +178,12 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
|
||||
zpios_print(run_args->file,
|
||||
"dmu_object_free() failed: %d\n", rc);
|
||||
dmu_tx_abort(tx);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -166,10 +195,10 @@ zpios_dmu_setup(run_args_t *run_args)
|
||||
uint64_t obj = 0ULL;
|
||||
int i, rc = 0, rc2;
|
||||
|
||||
(void)zpios_upcall(run_args->pre, PHASE_PRE_CREATE, run_args, 0);
|
||||
(void) zpios_upcall(run_args->pre, PHASE_PRE_CREATE, run_args, 0);
|
||||
t->start = zpios_timespec_now();
|
||||
|
||||
(void)snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);
|
||||
(void) snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);
|
||||
rc = dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL);
|
||||
if (rc) {
|
||||
zpios_print(run_args->file, "Error dmu_objset_create(%s, ...) "
|
||||
@ -233,9 +262,9 @@ out_destroy:
|
||||
out:
|
||||
t->stop = zpios_timespec_now();
|
||||
t->delta = zpios_timespec_sub(t->stop, t->start);
|
||||
(void)zpios_upcall(run_args->post, PHASE_POST_CREATE, run_args, rc);
|
||||
(void) zpios_upcall(run_args->post, PHASE_POST_CREATE, run_args, rc);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -244,13 +273,13 @@ zpios_setup_run(run_args_t **run_args, zpios_cmd_t *kcmd, struct file *file)
|
||||
run_args_t *ra;
|
||||
int rc, size;
|
||||
|
||||
size = sizeof(*ra) + kcmd->cmd_region_count * sizeof(zpios_region_t);
|
||||
size = sizeof (*ra) + kcmd->cmd_region_count * sizeof (zpios_region_t);
|
||||
|
||||
ra = vmem_zalloc(size, KM_SLEEP);
|
||||
if (ra == NULL) {
|
||||
zpios_print(file, "Unable to vmem_zalloc() %d bytes "
|
||||
"for regions\n", size);
|
||||
return -ENOMEM;
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
*run_args = ra;
|
||||
@ -277,7 +306,7 @@ zpios_setup_run(run_args_t **run_args, zpios_cmd_t *kcmd, struct file *file)
|
||||
mutex_init(&ra->lock_work, NULL, MUTEX_DEFAULT, NULL);
|
||||
mutex_init(&ra->lock_ctl, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
(void)zpios_upcall(ra->pre, PHASE_PRE_RUN, ra, 0);
|
||||
(void) zpios_upcall(ra->pre, PHASE_PRE_RUN, ra, 0);
|
||||
|
||||
rc = zpios_dmu_setup(ra);
|
||||
if (rc) {
|
||||
@ -287,7 +316,7 @@ zpios_setup_run(run_args_t **run_args, zpios_cmd_t *kcmd, struct file *file)
|
||||
*run_args = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -297,12 +326,13 @@ zpios_get_work_item(run_args_t *run_args, dmu_obj_t *obj, __u64 *offset,
|
||||
int i, j, count = 0;
|
||||
unsigned int random_int;
|
||||
|
||||
get_random_bytes(&random_int, sizeof(unsigned int));
|
||||
get_random_bytes(&random_int, sizeof (unsigned int));
|
||||
|
||||
mutex_enter(&run_args->lock_work);
|
||||
i = run_args->region_next;
|
||||
|
||||
/* XXX: I don't much care for this chunk selection mechansim
|
||||
/*
|
||||
* XXX: I don't much care for this chunk selection mechansim
|
||||
* there's the potential to burn a lot of time here doing nothing
|
||||
* useful while holding the global lock. This could give some
|
||||
* misleading performance results. I'll fix it latter.
|
||||
@ -340,20 +370,21 @@ zpios_get_work_item(run_args_t *run_args, dmu_obj_t *obj, __u64 *offset,
|
||||
|
||||
/* update ctl structure */
|
||||
if (run_args->region_noise) {
|
||||
get_random_bytes(&random_int, sizeof(unsigned int));
|
||||
run_args->region_next += random_int % run_args->region_noise;
|
||||
get_random_bytes(&random_int, sizeof (unsigned int));
|
||||
run_args->region_next +=
|
||||
random_int % run_args->region_noise;
|
||||
} else {
|
||||
run_args->region_next++;
|
||||
}
|
||||
|
||||
mutex_exit(&run_args->lock_work);
|
||||
return 1;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/* nothing left to do */
|
||||
mutex_exit(&run_args->lock_work);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -364,31 +395,29 @@ zpios_remove_objset(run_args_t *run_args)
|
||||
char name[32];
|
||||
int rc = 0, i;
|
||||
|
||||
(void)zpios_upcall(run_args->pre, PHASE_PRE_REMOVE, run_args, 0);
|
||||
(void) zpios_upcall(run_args->pre, PHASE_PRE_REMOVE, run_args, 0);
|
||||
t->start = zpios_timespec_now();
|
||||
|
||||
(void)snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);
|
||||
(void) snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);
|
||||
|
||||
if (run_args->flags & DMU_REMOVE) {
|
||||
if (run_args->flags & DMU_FPP) {
|
||||
for (i = 0; i < run_args->region_count; i++) {
|
||||
region = &run_args->regions[i];
|
||||
rc = zpios_dmu_object_free(run_args,
|
||||
region->obj.os,
|
||||
region->obj.obj);
|
||||
region->obj.os, region->obj.obj);
|
||||
if (rc)
|
||||
zpios_print(run_args->file, "Error "
|
||||
"removing object %d, %d\n",
|
||||
zpios_print(run_args->file,
|
||||
"Error removing object %d, %d\n",
|
||||
(int)region->obj.obj, rc);
|
||||
}
|
||||
} else {
|
||||
region = &run_args->regions[0];
|
||||
rc = zpios_dmu_object_free(run_args,
|
||||
region->obj.os,
|
||||
region->obj.obj);
|
||||
region->obj.os, region->obj.obj);
|
||||
if (rc)
|
||||
zpios_print(run_args->file, "Error "
|
||||
"removing object %d, %d\n",
|
||||
zpios_print(run_args->file,
|
||||
"Error removing object %d, %d\n",
|
||||
(int)region->obj.obj, rc);
|
||||
}
|
||||
}
|
||||
@ -404,7 +433,7 @@ zpios_remove_objset(run_args_t *run_args)
|
||||
|
||||
t->stop = zpios_timespec_now();
|
||||
t->delta = zpios_timespec_sub(t->stop, t->start);
|
||||
(void)zpios_upcall(run_args->post, PHASE_POST_REMOVE, run_args, rc);
|
||||
(void) zpios_upcall(run_args->post, PHASE_POST_REMOVE, run_args, rc);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -420,12 +449,12 @@ zpios_cleanup_run(run_args_t *run_args)
|
||||
if (run_args->threads[i]) {
|
||||
mutex_destroy(&run_args->threads[i]->lock);
|
||||
kmem_free(run_args->threads[i],
|
||||
sizeof(thread_data_t));
|
||||
sizeof (thread_data_t));
|
||||
}
|
||||
}
|
||||
|
||||
kmem_free(run_args->threads,
|
||||
sizeof(thread_data_t *) * run_args->thread_count);
|
||||
sizeof (thread_data_t *) * run_args->thread_count);
|
||||
}
|
||||
|
||||
for (i = 0; i < run_args->region_count; i++)
|
||||
@ -433,9 +462,9 @@ zpios_cleanup_run(run_args_t *run_args)
|
||||
|
||||
mutex_destroy(&run_args->lock_work);
|
||||
mutex_destroy(&run_args->lock_ctl);
|
||||
size = run_args->region_count * sizeof(zpios_region_t);
|
||||
size = run_args->region_count * sizeof (zpios_region_t);
|
||||
|
||||
vmem_free(run_args, sizeof(*run_args) + size);
|
||||
vmem_free(run_args, sizeof (*run_args) + size);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -463,7 +492,7 @@ zpios_dmu_write(run_args_t *run_args, objset_t *os, uint64_t object,
|
||||
zpios_print(run_args->file,
|
||||
"Error in dmu_tx_assign(), %d", rc);
|
||||
dmu_tx_abort(tx);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -474,7 +503,7 @@ zpios_dmu_write(run_args_t *run_args, objset_t *os, uint64_t object,
|
||||
dmu_write(os, object, offset, size, buf, tx);
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -489,7 +518,7 @@ zpios_dmu_read(run_args_t *run_args, objset_t *os, uint64_t object,
|
||||
if (run_args->flags & DMU_READ_NOPF)
|
||||
flags |= DMU_READ_NO_PREFETCH;
|
||||
|
||||
return dmu_read(os, object, offset, size, buf, flags);
|
||||
return (dmu_read(os, object, offset, size, buf, flags));
|
||||
}
|
||||
|
||||
static int
|
||||
@ -511,11 +540,12 @@ zpios_thread_main(void *data)
|
||||
int i, rc = 0;
|
||||
|
||||
if (chunk_noise) {
|
||||
get_random_bytes(&random_int, sizeof(unsigned int));
|
||||
get_random_bytes(&random_int, sizeof (unsigned int));
|
||||
chunk_noise_tmp = (random_int % (chunk_noise * 2))-chunk_noise;
|
||||
}
|
||||
|
||||
/* It's OK to vmem_alloc() this memory because it will be copied
|
||||
/*
|
||||
* It's OK to vmem_alloc() this memory because it will be copied
|
||||
* in to the slab and pointers to the slab copy will be setup in
|
||||
* the bio when the IO is submitted. This of course is not ideal
|
||||
* since we want a zero-copy IO path if possible. It would be nice
|
||||
@ -537,7 +567,7 @@ zpios_thread_main(void *data)
|
||||
while (zpios_get_work_item(run_args, &obj, &offset,
|
||||
&chunk_size, ®ion, DMU_WRITE)) {
|
||||
if (thread_delay) {
|
||||
get_random_bytes(&random_int, sizeof(unsigned int));
|
||||
get_random_bytes(&random_int, sizeof (unsigned int));
|
||||
thread_delay_tmp = random_int % thread_delay;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(thread_delay_tmp); /* In jiffies */
|
||||
@ -603,7 +633,7 @@ zpios_thread_main(void *data)
|
||||
while (zpios_get_work_item(run_args, &obj, &offset,
|
||||
&chunk_size, ®ion, DMU_READ)) {
|
||||
if (thread_delay) {
|
||||
get_random_bytes(&random_int, sizeof(unsigned int));
|
||||
get_random_bytes(&random_int, sizeof (unsigned int));
|
||||
thread_delay_tmp = random_int % thread_delay;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(thread_delay_tmp); /* In jiffies */
|
||||
@ -671,7 +701,7 @@ out:
|
||||
vmem_free(buf, chunk_size);
|
||||
do_exit(0);
|
||||
|
||||
return rc; /* Unreachable, due to do_exit() */
|
||||
return (rc); /* Unreachable, due to do_exit() */
|
||||
}
|
||||
|
||||
static int
|
||||
@ -691,13 +721,13 @@ zpios_threads_run(run_args_t *run_args)
|
||||
zpios_time_t *tr = &(run_args->stats.rd_time);
|
||||
int i, rc = 0, tc = run_args->thread_count;
|
||||
|
||||
tsks = kmem_zalloc(sizeof(struct task_struct *) * tc, KM_SLEEP);
|
||||
tsks = kmem_zalloc(sizeof (struct task_struct *) * tc, KM_SLEEP);
|
||||
if (tsks == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup2;
|
||||
}
|
||||
|
||||
run_args->threads = kmem_zalloc(sizeof(thread_data_t *) * tc, KM_SLEEP);
|
||||
run_args->threads = kmem_zalloc(sizeof (thread_data_t *)*tc, KM_SLEEP);
|
||||
if (run_args->threads == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
@ -708,7 +738,7 @@ zpios_threads_run(run_args_t *run_args)
|
||||
|
||||
/* Create all the needed threads which will sleep until awoken */
|
||||
for (i = 0; i < tc; i++) {
|
||||
thr = kmem_zalloc(sizeof(thread_data_t), KM_SLEEP);
|
||||
thr = kmem_zalloc(sizeof (thread_data_t), KM_SLEEP);
|
||||
if (thr == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto taskerr;
|
||||
@ -733,7 +763,7 @@ zpios_threads_run(run_args_t *run_args)
|
||||
tt->start = zpios_timespec_now();
|
||||
|
||||
/* Wake up all threads for write phase */
|
||||
(void)zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0);
|
||||
(void) zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0);
|
||||
for (i = 0; i < tc; i++)
|
||||
wake_up_process(tsks[i]);
|
||||
|
||||
@ -741,7 +771,7 @@ zpios_threads_run(run_args_t *run_args)
|
||||
tw->start = zpios_timespec_now();
|
||||
wait_event(run_args->waitq, zpios_thread_done(run_args));
|
||||
tw->stop = zpios_timespec_now();
|
||||
(void)zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc);
|
||||
(void) zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc);
|
||||
|
||||
for (i = 0; i < tc; i++) {
|
||||
thr = run_args->threads[i];
|
||||
@ -774,7 +804,7 @@ zpios_threads_run(run_args_t *run_args)
|
||||
mutex_exit(&run_args->lock_ctl);
|
||||
|
||||
/* Wake up all threads for read phase */
|
||||
(void)zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0);
|
||||
(void) zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0);
|
||||
for (i = 0; i < tc; i++)
|
||||
wake_up_process(tsks[i]);
|
||||
|
||||
@ -782,7 +812,7 @@ zpios_threads_run(run_args_t *run_args)
|
||||
tr->start = zpios_timespec_now();
|
||||
wait_event(run_args->waitq, zpios_thread_done(run_args));
|
||||
tr->stop = zpios_timespec_now();
|
||||
(void)zpios_upcall(run_args->post, PHASE_POST_READ, run_args, rc);
|
||||
(void) zpios_upcall(run_args->post, PHASE_POST_READ, run_args, rc);
|
||||
|
||||
for (i = 0; i < tc; i++) {
|
||||
thr = run_args->threads[i];
|
||||
@ -803,10 +833,10 @@ out:
|
||||
tr->delta = zpios_timespec_sub(tr->stop, tr->start);
|
||||
|
||||
cleanup:
|
||||
kmem_free(tsks, sizeof(struct task_struct *) * tc);
|
||||
kmem_free(tsks, sizeof (struct task_struct *) * tc);
|
||||
cleanup2:
|
||||
/* Returns first encountered thread error (if any) */
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
taskerr:
|
||||
/* Destroy all threads that were created successfully */
|
||||
@ -829,14 +859,14 @@ zpios_do_one_run(struct file *file, zpios_cmd_t *kcmd,
|
||||
(!kcmd->cmd_thread_count) || (!kcmd->cmd_region_count)) {
|
||||
zpios_print(file, "Invalid chunk_size, region_size, "
|
||||
"thread_count, or region_count, %d\n", -EINVAL);
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
if (!(kcmd->cmd_flags & DMU_WRITE) ||
|
||||
!(kcmd->cmd_flags & DMU_READ)) {
|
||||
zpios_print(file, "Invalid flags, minimally DMU_WRITE "
|
||||
"and DMU_READ must be set, %d\n", -EINVAL);
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
if ((kcmd->cmd_flags & (DMU_WRITE_ZC | DMU_READ_ZC)) &&
|
||||
@ -844,10 +874,11 @@ zpios_do_one_run(struct file *file, zpios_cmd_t *kcmd,
|
||||
zpios_print(file, "Invalid flags, DMU_*_ZC incompatible "
|
||||
"with DMU_VERIFY, used for performance analysis "
|
||||
"only, %d\n", -EINVAL);
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
/* Opaque data on return contains structs of the following form:
|
||||
/*
|
||||
* Opaque data on return contains structs of the following form:
|
||||
*
|
||||
* zpios_stat_t stats[];
|
||||
* stats[0] = run_args->stats;
|
||||
@ -856,18 +887,18 @@ zpios_do_one_run(struct file *file, zpios_cmd_t *kcmd,
|
||||
*
|
||||
* Where N is the number of threads, and M is the number of regions.
|
||||
*/
|
||||
size = (sizeof(zpios_stats_t) +
|
||||
(kcmd->cmd_thread_count * sizeof(zpios_stats_t)) +
|
||||
(kcmd->cmd_region_count * sizeof(zpios_stats_t)));
|
||||
size = (sizeof (zpios_stats_t) +
|
||||
(kcmd->cmd_thread_count * sizeof (zpios_stats_t)) +
|
||||
(kcmd->cmd_region_count * sizeof (zpios_stats_t)));
|
||||
if (data_size < size) {
|
||||
zpios_print(file, "Invalid size, command data buffer "
|
||||
"size too small, (%d < %d)\n", data_size, size);
|
||||
return -ENOSPC;
|
||||
return (-ENOSPC);
|
||||
}
|
||||
|
||||
rc = zpios_setup_run(&run_args, kcmd, file);
|
||||
if (rc)
|
||||
return rc;
|
||||
return (rc);
|
||||
|
||||
rc = zpios_threads_run(run_args);
|
||||
zpios_remove_objset(run_args);
|
||||
@ -889,9 +920,9 @@ zpios_do_one_run(struct file *file, zpios_cmd_t *kcmd,
|
||||
cleanup:
|
||||
zpios_cleanup_run(run_args);
|
||||
|
||||
(void)zpios_upcall(kcmd->cmd_post, PHASE_POST_RUN, run_args, 0);
|
||||
(void) zpios_upcall(kcmd->cmd_post, PHASE_POST_RUN, run_args, 0);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -901,24 +932,25 @@ zpios_open(struct inode *inode, struct file *file)
|
||||
zpios_info_t *info;
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
info = (zpios_info_t *)kmem_alloc(sizeof(*info), KM_SLEEP);
|
||||
info = (zpios_info_t *)kmem_alloc(sizeof (*info), KM_SLEEP);
|
||||
if (info == NULL)
|
||||
return -ENOMEM;
|
||||
return (-ENOMEM);
|
||||
|
||||
spin_lock_init(&info->info_lock);
|
||||
info->info_size = ZPIOS_INFO_BUFFER_SIZE;
|
||||
info->info_buffer = (char *)vmem_alloc(ZPIOS_INFO_BUFFER_SIZE,KM_SLEEP);
|
||||
info->info_buffer =
|
||||
(char *) vmem_alloc(ZPIOS_INFO_BUFFER_SIZE, KM_SLEEP);
|
||||
if (info->info_buffer == NULL) {
|
||||
kmem_free(info, sizeof(*info));
|
||||
return -ENOMEM;
|
||||
kmem_free(info, sizeof (*info));
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
info->info_head = info->info_buffer;
|
||||
file->private_data = (void *)info;
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -928,15 +960,15 @@ zpios_release(struct inode *inode, struct file *file)
|
||||
zpios_info_t *info = (zpios_info_t *)file->private_data;
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
vmem_free(info->info_buffer, ZPIOS_INFO_BUFFER_SIZE);
|
||||
kmem_free(info, sizeof(*info));
|
||||
kmem_free(info, sizeof (*info));
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -952,7 +984,7 @@ zpios_buffer_clear(struct file *file, zpios_cfg_t *kcfg, unsigned long arg)
|
||||
info->info_head = info->info_buffer;
|
||||
spin_unlock(&info->info_lock);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -987,12 +1019,13 @@ zpios_buffer_size(struct file *file, zpios_cfg_t *kcfg, unsigned long arg)
|
||||
|
||||
kcfg->cfg_rc1 = info->info_size;
|
||||
|
||||
if (copy_to_user((struct zpios_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
|
||||
if (copy_to_user((struct zpios_cfg_t __user *)arg,
|
||||
kcfg, sizeof (*kcfg)))
|
||||
rc = -EFAULT;
|
||||
out:
|
||||
spin_unlock(&info->info_lock);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1001,24 +1034,26 @@ zpios_ioctl_cfg(struct file *file, unsigned long arg)
|
||||
zpios_cfg_t kcfg;
|
||||
int rc = 0;
|
||||
|
||||
if (copy_from_user(&kcfg, (zpios_cfg_t *)arg, sizeof(kcfg)))
|
||||
return -EFAULT;
|
||||
if (copy_from_user(&kcfg, (zpios_cfg_t *)arg, sizeof (kcfg)))
|
||||
return (-EFAULT);
|
||||
|
||||
if (kcfg.cfg_magic != ZPIOS_CFG_MAGIC) {
|
||||
zpios_print(file, "Bad config magic 0x%x != 0x%x\n",
|
||||
kcfg.cfg_magic, ZPIOS_CFG_MAGIC);
|
||||
return -EINVAL;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
switch (kcfg.cfg_cmd) {
|
||||
case ZPIOS_CFG_BUFFER_CLEAR:
|
||||
/* cfg_arg1 - Unused
|
||||
/*
|
||||
* cfg_arg1 - Unused
|
||||
* cfg_rc1 - Unused
|
||||
*/
|
||||
rc = zpios_buffer_clear(file, &kcfg, arg);
|
||||
break;
|
||||
case ZPIOS_CFG_BUFFER_SIZE:
|
||||
/* cfg_arg1 - 0 - query size; >0 resize
|
||||
/*
|
||||
* cfg_arg1 - 0 - query size; >0 resize
|
||||
* cfg_rc1 - Set to current buffer size
|
||||
*/
|
||||
rc = zpios_buffer_size(file, &kcfg, arg);
|
||||
@ -1030,7 +1065,7 @@ zpios_ioctl_cfg(struct file *file, unsigned long arg)
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1040,14 +1075,14 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
|
||||
void *data = NULL;
|
||||
int rc = -EINVAL;
|
||||
|
||||
kcmd = kmem_alloc(sizeof(zpios_cmd_t), KM_SLEEP);
|
||||
kcmd = kmem_alloc(sizeof (zpios_cmd_t), KM_SLEEP);
|
||||
if (kcmd == NULL) {
|
||||
zpios_print(file, "Unable to kmem_alloc() %ld byte for "
|
||||
"zpios_cmd_t\n", (long int)sizeof(zpios_cmd_t));
|
||||
return -ENOMEM;
|
||||
"zpios_cmd_t\n", (long int)sizeof (zpios_cmd_t));
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof(zpios_cmd_t));
|
||||
rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof (zpios_cmd_t));
|
||||
if (rc) {
|
||||
zpios_print(file, "Unable to copy command structure "
|
||||
"from user to kernel memory, %d\n", rc);
|
||||
@ -1057,7 +1092,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
|
||||
if (kcmd->cmd_magic != ZPIOS_CMD_MAGIC) {
|
||||
zpios_print(file, "Bad command magic 0x%x != 0x%x\n",
|
||||
kcmd->cmd_magic, ZPIOS_CFG_MAGIC);
|
||||
rc = -EINVAL;
|
||||
rc = (-EINVAL);
|
||||
goto out_cmd;
|
||||
}
|
||||
|
||||
@ -1100,9 +1135,9 @@ out_data:
|
||||
vmem_free(data, kcmd->cmd_data_size);
|
||||
}
|
||||
out_cmd:
|
||||
kmem_free(kcmd, sizeof(zpios_cmd_t));
|
||||
kmem_free(kcmd, sizeof (zpios_cmd_t));
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static long
|
||||
@ -1113,10 +1148,10 @@ zpios_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
|
||||
/* Ignore tty ioctls */
|
||||
if ((cmd & 0xffffff00) == ((int)'T') << 8)
|
||||
return -ENOTTY;
|
||||
return (-ENOTTY);
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
switch (cmd) {
|
||||
case ZPIOS_CFG:
|
||||
@ -1131,7 +1166,7 @@ zpios_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -1139,11 +1174,12 @@ zpios_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
static long
|
||||
zpios_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
return zpios_unlocked_ioctl(file, cmd, arg);
|
||||
return (zpios_unlocked_ioctl(file, cmd, arg));
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
/* I'm not sure why you would want to write in to this buffer from
|
||||
/*
|
||||
* I'm not sure why you would want to write in to this buffer from
|
||||
* user space since its principle use is to pass test status info
|
||||
* back to the user space, but I don't see any reason to prevent it.
|
||||
*/
|
||||
@ -1156,7 +1192,7 @@ zpios_write(struct file *file, const char __user *buf,
|
||||
int rc = 0;
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
@ -1182,19 +1218,18 @@ zpios_write(struct file *file, const char __user *buf,
|
||||
rc = count;
|
||||
out:
|
||||
spin_unlock(&info->info_lock);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
zpios_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
zpios_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
unsigned int minor = iminor(file->f_dentry->d_inode);
|
||||
zpios_info_t *info = (zpios_info_t *)file->private_data;
|
||||
int rc = 0;
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
@ -1218,7 +1253,7 @@ zpios_read(struct file *file, char __user *buf,
|
||||
rc = count;
|
||||
out:
|
||||
spin_unlock(&info->info_lock);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static loff_t zpios_seek(struct file *file, loff_t offset, int origin)
|
||||
@ -1228,7 +1263,7 @@ static loff_t zpios_seek(struct file *file, loff_t offset, int origin)
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (minor >= ZPIOS_MINORS)
|
||||
return -ENXIO;
|
||||
return (-ENXIO);
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
@ -1254,7 +1289,7 @@ static loff_t zpios_seek(struct file *file, loff_t offset, int origin)
|
||||
|
||||
spin_unlock(&info->info_lock);
|
||||
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static struct cdev zpios_cdev;
|
||||
@ -1304,10 +1339,11 @@ zpios_init(void)
|
||||
|
||||
zpios_device = spl_device_create(zpios_class, NULL,
|
||||
dev, NULL, ZPIOS_NAME);
|
||||
return 0;
|
||||
|
||||
return (0);
|
||||
error:
|
||||
printk(KERN_ERR "ZPIOS: Error registering zpios device, %d\n", rc);
|
||||
return rc;
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1320,7 +1356,7 @@ zpios_fini(void)
|
||||
cdev_del(&zpios_cdev);
|
||||
unregister_chrdev_region(dev, ZPIOS_MINORS);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
spl_module_init(zpios_init);
|
||||
|
Loading…
Reference in New Issue
Block a user