Add -c to zpool iostat & status to run command

This patch adds a command (-c) option to zpool status and zpool iostat.  The
-c option allows you to run an arbitrary command on each vdev and display
the first line of output in zpool status/iostat.  The environment vars
VDEV_PATH and VDEV_UPATH are set to the vdev's path and "underlying path"
before running the command.  For device mapper, multipath, or partitioned
vdevs, VDEV_UPATH is the actual underlying /dev/sd* disk.  This can be useful
if the command you're running requires a /dev/sd* device.

The patch also uses /sys/block/<dev>/slaves/ to lookup the underlying device
instead of using libdevmapper.  This not only removes the libdevmapper
requirement at build time, but also allows you to resolve device mapper
devices without being root.  This means that UDEV_UPATH get set correctly
when running zpool status/iostat as an unprivileged user.

Example:

$ zpool status -c 'echo I am $VDEV_PATH, $VDEV_UPATH'

NAME        STATE     READ WRITE CKSUM
mypool      ONLINE       0     0     0
  mirror-0  ONLINE       0     0     0
    mpatha  ONLINE       0     0     0  I am /dev/mapper/mpatha, /dev/sdc
    sdb     ONLINE       0     0     0  I am /dev/sdb1, /dev/sdb

Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tony Hutter <hutter2@llnl.gov>
Closes #5368
This commit is contained in:
Tony Hutter 2016-11-29 13:45:38 -08:00 committed by Brian Behlendorf
parent 2f71caf2d9
commit 8720e9e748
16 changed files with 486 additions and 136 deletions

View File

@ -215,9 +215,7 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
if (offline)
return; /* don't intervene if it was taken offline */
#ifdef HAVE_LIBDEVMAPPER
is_dm = zfs_dev_is_dm(path);
#endif
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
" wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
physpath ? physpath : "NULL", wholedisk, is_dm,

View File

@ -12,8 +12,6 @@
# Linux SCSI enclosure services (ses) driver. The script will do nothing
# if you have no enclosure, or if your enclosure isn't supported.
#
# This script also requires ZFS to be built with libdevmapper support.
#
# Exit codes:
# 0: enclosure led successfully set
# 1: enclosure leds not not available

View File

@ -33,6 +33,7 @@
#include <strings.h>
#include <libzfs.h>
#include <sys/zfs_context.h>
#include "zpool_util.h"
@ -316,3 +317,162 @@ for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
}
return (for_each_vdev_cb(zhp, nvroot, func, data));
}
/* Thread function run for each vdev */
static void
vdev_run_cmd_thread(void *cb_cmd_data)
{
vdev_cmd_data_t *data = cb_cmd_data;
char *pos = NULL;
FILE *fp;
size_t len = 0;
char cmd[_POSIX_ARG_MAX];
/* Set our VDEV_PATH and VDEV_UPATH env vars and run command */
if (snprintf(cmd, sizeof (cmd), "VDEV_PATH=%s && VDEV_UPATH=%s && %s",
data->path, data->upath ? data->upath : "\"\"", data->cmd) >=
sizeof (cmd)) {
/* Our string was truncated */
return;
}
fp = popen(cmd, "r");
if (fp == NULL)
return;
data->line = NULL;
/* Save the first line of output from the command */
if (getline(&data->line, &len, fp) != -1) {
/* Success. Remove newline from the end, if necessary. */
if ((pos = strchr(data->line, '\n')) != NULL)
*pos = '\0';
} else {
data->line = NULL;
}
pclose(fp);
}
/* For each vdev in the pool run a command */
static int
for_each_vdev_run_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_vcdl)
{
vdev_cmd_data_list_t *vcdl = cb_vcdl;
vdev_cmd_data_t *data;
char *path = NULL;
int i;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
return (1);
/* Spares show more than once if they're in use, so skip if exists */
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) == 0) &&
(strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
/* vdev already exists, skip it */
return (0);
}
}
/*
* Resize our array and add in the new element.
*/
if (!(vcdl->data = realloc(vcdl->data,
sizeof (*vcdl->data) * (vcdl->count + 1))))
return (ENOMEM); /* couldn't realloc */
data = &vcdl->data[vcdl->count];
data->pool = strdup(zpool_get_name(zhp));
data->path = strdup(path);
data->upath = zfs_get_underlying_path(path);
data->cmd = vcdl->cmd;
vcdl->count++;
return (0);
}
/* Get the names and count of the vdevs */
static int
all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
{
return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
}
/*
* Now that vcdl is populated with our complete list of vdevs, spawn
* off the commands.
*/
static void
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
{
taskq_t *t;
int i;
/* 5 * boot_ncpus selfishly chosen since it works best on LLNL's HW */
int max_threads = 5 * boot_ncpus;
/*
* Under Linux we use a taskq to parallelize running a command
* on each vdev. It is therefore necessary to initialize this
* functionality for the duration of the threads.
*/
thread_init();
t = taskq_create("z_pool_cmd", max_threads, defclsyspri, max_threads,
INT_MAX, 0);
if (t == NULL)
return;
/* Spawn off the command for each vdev */
for (i = 0; i < vcdl->count; i++) {
(void) taskq_dispatch(t, vdev_run_cmd_thread,
(void *) &vcdl->data[i], TQ_SLEEP);
}
/* Wait for threads to finish */
taskq_wait(t);
taskq_destroy(t);
thread_fini();
}
/*
* Run command 'cmd' on all vdevs in all pools. Saves the first line of output
* from the command in vcdk->data[].line for all vdevs.
*
* Returns a vdev_cmd_data_list_t that must be freed with
* free_vdev_cmd_data_list();
*/
vdev_cmd_data_list_t *
all_pools_for_each_vdev_run(int argc, char **argv, char *cmd)
{
vdev_cmd_data_list_t *vcdl;
vcdl = safe_malloc(sizeof (vcdl));
vcdl->cmd = cmd;
/* Gather our list of all vdevs in all pools */
for_each_pool(argc, argv, B_TRUE, NULL,
all_pools_for_each_vdev_gather_cb, vcdl);
/* Run command on all vdevs in all pools */
all_pools_for_each_vdev_run_vcdl(vcdl);
return (vcdl);
}
/*
* Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
*/
void
free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
{
int i;
for (i = 0; i < vcdl->count; i++) {
free(vcdl->data[i].path);
free(vcdl->data[i].pool);
free(vcdl->data[i].upath);
free(vcdl->data[i].line);
}
free(vcdl->data);
free(vcdl);
}

View File

@ -1510,8 +1510,23 @@ typedef struct status_cbdata {
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Print output line for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
{
int i;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) == 0) &&
(strcmp(vcdl->data[i].pool, pool) == 0)) {
printf("%s", vcdl->data[i].line);
break;
}
}
}
/*
* Print out configuration state as requested by status_callback.
*/
@ -1528,6 +1543,7 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
uint64_t notpresent;
spare_cbdata_t spare_cb;
char *state;
char *path = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
@ -1560,7 +1576,6 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
char *path;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" was %s", path);
} else if (vs->vs_aux != 0) {
@ -1641,6 +1656,13 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
"resilvering" : "repairing");
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
(void) printf("\n");
for (c = 0; c < children; c++) {
@ -2586,6 +2608,7 @@ typedef struct iostat_cbdata {
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
} iostat_cbdata_t;
/* iostat labels */
@ -3393,6 +3416,18 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
if (cb->cb_flags & IOS_ANYHISTO_M)
printf("\n");
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
@ -3924,10 +3959,11 @@ fsleep(float sec) {
/*
* zpool iostat [-ghHLpPvy] [[-lq]|[-r|-w]] [-n name] [-T d|u]
* zpool iostat [-c CMD] [-ghHLpPvy] [[-lq]|[-r|-w]] [-n name] [-T d|u]
* [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
@ -3965,6 +4001,7 @@ zpool_do_iostat(int argc, char **argv)
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
@ -3973,8 +4010,11 @@ zpool_do_iostat(int argc, char **argv)
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "gLPT:vyhplqrwH")) != -1) {
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwH")) != -1) {
switch (c) {
case 'c':
cmd = optarg;
break;
case 'g':
guid = B_TRUE;
break;
@ -4167,7 +4207,6 @@ zpool_do_iostat(int argc, char **argv)
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
@ -4217,8 +4256,15 @@ zpool_do_iostat(int argc, char **argv)
continue;
}
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd);
pool_list_iter(list, B_FALSE, print_iostat, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
@ -6016,8 +6062,9 @@ status_callback(zpool_handle_t *zhp, void *data)
}
/*
* zpool status [-gLPvx] [-T d|u] [pool] ... [interval [count]]
* zpool status [-c CMD] [-gLPvx] [-T d|u] [pool] ... [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
@ -6036,10 +6083,14 @@ zpool_do_status(int argc, char **argv)
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "gLPvxDT:")) != -1) {
while ((c = getopt(argc, argv, "c:gLPvxDT:")) != -1) {
switch (c) {
case 'c':
cmd = optarg;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
@ -6083,9 +6134,15 @@ zpool_do_status(int argc, char **argv)
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd);
ret = for_each_pool(argc, argv, B_TRUE, NULL,
status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)

View File

@ -72,6 +72,27 @@ void pool_list_remove(zpool_list_t *, zpool_handle_t *);
libzfs_handle_t *g_zfs;
typedef struct vdev_cmd_data
{
char *line; /* cmd output */
char *path; /* vdev path */
char *upath; /* vdev underlying path */
char *pool; /* Pool name */
char *cmd; /* backpointer to cmd */
} vdev_cmd_data_t;
typedef struct vdev_cmd_data_list
{
char *cmd; /* Command to run */
unsigned int count; /* Number of vdev_cmd_data items (vdevs) */
vdev_cmd_data_t *data; /* Array of vdevs */
} vdev_cmd_data_list_t;
vdev_cmd_data_list_t * all_pools_for_each_vdev_run(int argc, char **argv,
char *cmd);
void free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl);
#ifdef __cplusplus
}
#endif

View File

@ -1,15 +0,0 @@
dnl #
dnl # Check for libdevmapper. libdevmapper is optional for building, but
dnl # required for auto-online/auto-replace functionality for DM/multipath
dnl # disks.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_LIBDEVMAPPER], [
AC_CHECK_HEADER([libdevmapper.h], [
AC_SUBST([LIBDEVMAPPER], ["-ldevmapper"])
AC_DEFINE([HAVE_LIBDEVMAPPER], 1, [Define if you have libdevmapper])
user_libdevmapper=yes
], [
user_libdevmapper=no
])
])

View File

@ -12,7 +12,6 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [
ZFS_AC_CONFIG_USER_LIBTIRPC
ZFS_AC_CONFIG_USER_LIBBLKID
ZFS_AC_CONFIG_USER_LIBATTR
ZFS_AC_CONFIG_USER_LIBDEVMAPPER
ZFS_AC_CONFIG_USER_LIBUDEV
ZFS_AC_CONFIG_USER_FRAME_LARGER_THAN
ZFS_AC_CONFIG_USER_RUNSTATEDIR

View File

@ -304,8 +304,3 @@ AC_CONFIG_FILES([
AC_OUTPUT
AS_IF([test "x$user_libdevmapper" != xyes && test "$ZFS_CONFIG" != kernel ], [
AC_MSG_WARN([Building without libdevmapper. Auto-replace, auto-online, \
and statechange-led.sh may not work correctly with device mapper vdevs.])
])

View File

@ -838,6 +838,7 @@ extern int zpool_fru_set(zpool_handle_t *, uint64_t, const char *);
extern boolean_t is_mpath_whole_disk(const char *);
extern void update_vdev_config_dev_strs(nvlist_t *);
extern char *zfs_strip_partition(char *);
extern char *zfs_strip_partition_path(char *);
#ifdef HAVE_LIBUDEV
struct udev_device;

View File

@ -35,7 +35,7 @@ libzfs_la_LIBADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libzpool/libzpool.la
libzfs_la_LIBADD += -lm $(LIBBLKID) $(LIBUDEV) $(LIBDEVMAPPER)
libzfs_la_LIBADD += -lm $(LIBBLKID) $(LIBUDEV)
libzfs_la_LDFLAGS = -version-info 2:0:0
EXTRA_DIST = $(libzfs_pc_DATA) $(USER_C)

View File

@ -41,9 +41,6 @@
#include <sys/vtoc.h>
#include <sys/zfs_ioctl.h>
#include <dlfcn.h>
#if HAVE_LIBDEVMAPPER
#include <libdevmapper.h>
#endif
#include "zfs_namecheck.h"
#include "zfs_prop.h"
@ -3432,6 +3429,43 @@ zfs_strip_partition(char *path)
return (tmp);
}
/*
* Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
*
* path: /dev/sda1
* returns: /dev/sda
*
* Returned string must be freed.
*/
char *
zfs_strip_partition_path(char *path)
{
char *newpath = strdup(path);
char *sd_offset;
char *new_sd;
if (!newpath)
return (NULL);
/* Point to "sda1" part of "/dev/sda1" */
sd_offset = strrchr(newpath, '/') + 1;
/* Get our new name "sda" */
new_sd = zfs_strip_partition(sd_offset);
if (!new_sd) {
free(newpath);
return (NULL);
}
/* Paste the "sda" where "sda1" was */
strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
/* Free temporary "sda" */
free(new_sd);
return (newpath);
}
#define PATH_BUF_LEN 64
/*
@ -4318,112 +4352,69 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
return (0);
}
#if HAVE_LIBDEVMAPPER
static void libdevmapper_dummy_log(int level, const char *file, int line,
int dm_errno_or_class, const char *f, ...) {}
/* Disable libdevmapper error logging */
static void disable_libdevmapper_errors(void) {
dm_log_with_errno_init(libdevmapper_dummy_log);
}
/* Enable libdevmapper error logging */
static void enable_libdevmapper_errors(void) {
dm_log_with_errno_init(NULL);
}
#endif
/*
* Allocate and return the underlying device name for a device mapper device.
* If a device mapper device maps to multiple devices, return the first device.
*
* For example, dm_name = "/dev/dm-0" could return "/dev/sda"
*
* dm_name should include the "/dev[/mapper]" prefix.
* For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
* DM device (like /dev/disk/by-vdev/A0) are also allowed.
*
* Returns device name, or NULL on error or no match. If dm_name is not a DM
* device then return NULL.
*
* NOTE: The returned name string must be *freed*.
*/
static char * dm_get_underlying_path(char *dm_name)
char *
dm_get_underlying_path(char *dm_name)
{
char *name = NULL;
#if HAVE_LIBDEVMAPPER
char *tmp;
struct dm_task *dmt = NULL;
struct dm_tree *dt = NULL;
struct dm_tree_node *root, *child;
void *handle = NULL;
struct dm_info info;
const struct dm_info *child_info;
DIR *dp = NULL;
struct dirent *ep;
char *realp;
char *tmp = NULL;
char *path = NULL;
char *dev_str;
int size;
if (dm_name == NULL)
return (NULL);
/* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
realp = realpath(dm_name, NULL);
if (realp == NULL)
return (NULL);
/*
* Disable libdevmapper errors. It's entirely possible user is not
* running devmapper, or that dm_name is not a devmapper device.
* That's totally ok, we will just harmlessly and silently return NULL.
* If they preface 'dev' with a path (like "/dev") then strip it off.
* We just want the 'dm-N' part.
*/
disable_libdevmapper_errors();
tmp = strrchr(realp, '/');
if (tmp != NULL)
dev_str = tmp + 1; /* +1 since we want the chr after '/' */
else
dev_str = tmp;
/*
* libdevmapper tutorial
*
* libdevmapper is basically a fancy wrapper for its ioctls. You
* create a "task", fill in the needed info to the task (fill in the
* ioctl fields), then run the task (call the ioctl).
*
* First we need the major/minor number for our DM device.
*/
if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
if (size == -1 || !tmp)
goto end;
/* Lookup the name in libdevmapper */
if (!dm_task_set_name(dmt, dm_name)) {
enable_libdevmapper_errors();
dp = opendir(tmp);
if (dp == NULL)
goto end;
/* Return first sd* entry in /sys/block/dm-N/slaves/ */
while ((ep = readdir(dp))) {
if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
size = asprintf(&path, "/dev/%s", ep->d_name);
break;
}
}
if (!dm_task_run(dmt))
goto end;
/* Get DM device's major/minor */
if (!dm_task_get_info(dmt, &info))
goto end;
/* We have major/minor number. Lookup the dm device's children */
if (!(dt = dm_tree_create()))
goto end;
/* We add the device into the tree and its children get populated */
if (!dm_tree_add_dev(dt, info.major, info.minor))
goto end;
if (!(root = dm_tree_find_node(dt, 0, 0)))
goto end;
if (!(child = dm_tree_next_child(&handle, root, 1)))
goto end;
/* Get child's major/minor numbers */
if (!(child_info = dm_tree_node_get_info(child)))
goto end;
if ((asprintf(&tmp, "/dev/block/%d:%d", child_info->major,
child_info->minor) == -1) || tmp == NULL)
goto end;
/* Further translate /dev/block/ name into the normal name */
name = realpath(tmp, NULL);
free(tmp);
end:
if (dmt)
dm_task_destroy(dmt);
if (dt)
dm_tree_free(dt);
enable_libdevmapper_errors();
#endif /* HAVE_LIBDEVMAPPER */
return (name);
if (dp != NULL)
closedir(dp);
free(tmp);
free(realp);
return (path);
}
/*
@ -4436,7 +4427,7 @@ zfs_dev_is_dm(char *dev_name)
char *tmp;
tmp = dm_get_underlying_path(dev_name);
if (!tmp)
if (tmp == NULL)
return (0);
free(tmp);
@ -4489,17 +4480,17 @@ zfs_get_underlying_path(char *dev_name)
char *name = NULL;
char *tmp;
if (!dev_name)
if (dev_name == NULL)
return (NULL);
tmp = dm_get_underlying_path(dev_name);
/* dev_name not a DM device, so just un-symlinkize it */
if (!tmp)
if (tmp == NULL)
tmp = realpath(dev_name, NULL);
if (tmp) {
name = zfs_strip_partition(tmp);
if (tmp != NULL) {
name = zfs_strip_partition_path(tmp);
free(tmp);
}
@ -4532,12 +4523,12 @@ zfs_get_enclosure_sysfs_path(char *dev_name)
size_t size;
int tmpsize;
if (!dev_name)
if (dev_name == NULL)
return (NULL);
/* If they preface 'dev' with a path (like "/dev") then strip it off */
tmp1 = strrchr(dev_name, '/');
if (tmp1)
if (tmp1 != NULL)
dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
@ -4558,7 +4549,7 @@ zfs_get_enclosure_sysfs_path(char *dev_name)
*/
while ((ep = readdir(dp))) {
/* Ignore everything that's not our enclosure_device link */
if (!strstr(ep->d_name, "enclosure_device"))
if (strstr(ep->d_name, "enclosure_device") == NULL)
continue;
if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
@ -4605,7 +4596,7 @@ end:
free(tmp2);
free(tmp1);
if (dp)
if (dp != NULL)
closedir(dp);
return (path);

View File

@ -96,7 +96,7 @@ zpool \- configures ZFS storage pools
.LP
.nf
\fB\fBzpool iostat\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [\fB-lq\fR]|[\fB-r\fR|-\fBw\fR]]
\fB\fBzpool iostat\fR [\fB-c\fR \fBCMD\fR] [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [\fB-lq\fR]|[\fB-r\fR|-\fBw\fR]]
[[\fIpool\fR ...]|[\fIpool vdev\fR ...]|[\fIvdev\fR ...]] [\fIinterval\fR[\fIcount\fR]]\fR
.fi
@ -159,7 +159,7 @@ zpool \- configures ZFS storage pools
.LP
.nf
\fBzpool status\fR [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.fi
.LP
@ -1523,7 +1523,7 @@ Scan using the default search path, the libblkid cache will not be consulted. A
.sp
.ne 2
.na
\fB\fBzpool iostat\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [[\fB-lq\fR]|[\fB-r\fR|\fB-w\fR]] [[\fIpool\fR ...]|[\fIpool vdev\fR ...]|[\fIvdev\fR ...]] [\fIinterval\fR[\fIcount\fR]]\fR
\fB\fBzpool iostat\fR [\fB-c\fR \fBCMD\fR] [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-ghHLpPvy\fR] [[\fB-lq\fR]|[\fB-r\fR|\fB-w\fR]] [[\fIpool\fR ...]|[\fIpool vdev\fR ...]|[\fIvdev\fR ...]] [\fIinterval\fR[\fIcount\fR]]\fR
.ad
.sp .6
@ -1539,6 +1539,23 @@ statistics since boot regardless of whether \fIinterval\fR and \fIcount\fR
are passed. However, this behavior can be suppressed with the -y flag. Also
note that the units of 'K', 'M', 'G'... that are printed in the report are in
base 1024. To get the raw values, use the \fB-p\fR flag.
.sp
.ne 2
.na
\fB\fB-c\fR \fBCMD\fR
.ad
.RS 12n
Run a command on each vdev and include first line of output
.sp
The \fB-c\fR option allows you to run an arbitrary command on each vdev and
display the first line of output in zpool iostat. The environment vars
\fBVDEV_PATH\fR and \fBVDEV_UPATH\fR are set to the vdev's path and "underlying
path" before running the command. For device mapper, multipath, or partitioned
vdevs, \fBVDEV_UPATH\fR is the actual underlying /dev/sd* disk. This can be
useful if the command you're running requires a /dev/sd* device. Commands run
in parallel for each vdev for performance.
.RE
.sp
.ne 2
.na
@ -2082,7 +2099,7 @@ Sets the specified property for \fInewpool\fR. See the “Properties” section
.sp
.ne 2
.na
\fBzpool status\fR [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
\fBzpool status\fR [\fB-c\fR \fBCMD\fR] [\fB-gLPvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.ad
.sp .6
.RS 4n
@ -2090,6 +2107,23 @@ Displays the detailed health status for the given pools. If no \fIpool\fR is spe
.sp
If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change.
.sp
.ne 2
.na
\fB\fB-c\fR \fBCMD\fR
.ad
.RS 12n
Run a command on each vdev and include first line of output
.sp
The \fB-c\fR option allows you to run an arbitrary command on each vdev and
display the first line of output in zpool status. The environment vars
\fBVDEV_PATH\fR and \fBVDEV_UPATH\fR are set to the vdev's path and "underlying
path" before running the command. For device mapper, multipath, or partitioned
vdevs, \fBVDEV_UPATH\fR is the actual underlying /dev/sd* disk. This can be
useful if the command you're running requires a /dev/sd* device. Commands run
in parallel for each vdev for performance.
.RE
.sp
.ne 2
.na
@ -2519,6 +2553,40 @@ data 23.9G 14.6G 9.30G 48% - 61% 1.00x ONLINE -
c1t3d0 - - - - -
.fi
.in -2
.sp
.LP
\fBExample 16 \fRRunning commands in zpool status and zpool iostat with -c
.sp
.LP
Some examples of using the command (-c) option with zpool status and zpool
iostat:
.sp
.in +2
.nf
# \fBzpool status -c \[aq]echo I am $VDEV_PATH, $VDEV_UPATH\[aq]\fR
NAME STATE READ WRITE CKSUM
mypool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
mpatha ONLINE 0 0 0 I am /dev/mapper/mpatha, /dev/sdc
sdb ONLINE 0 0 0 I am /dev/sdb1, /dev/sdb
.fi
.in -2
.sp
.in +2
.nf
# \fBzpool iostat -v -c \[aq]smartctl -a $VDEV_UPATH | grep "Current Drive Temperature"\[aq]\fR
mypool 997M 7.25T 0 0 105K 106K
mirror 997M 7.25T 0 0 105K 106K
B0 - - 0 0 17.4K 15.2K Current Drive Temperature: 25 C
B1 - - 0 0 17.4K 15.2K Current Drive Temperature: 24 C
B2 - - 0 0 17.5K 15.2K Current Drive Temperature: 24 C
B3 - - 0 0 0 15.1K Current Drive Temperature: 24 C
logs - - - - - -
B8 0 7.25T 0 0 1.14K 20.2K Current Drive Temperature: 23 C
.fi
.in -2
.SH EXIT STATUS
.sp

View File

@ -74,7 +74,6 @@ Requires: libzpool2 = %{version}
Requires: libnvpair1 = %{version}
Requires: libuutil1 = %{version}
Requires: libzfs2 = %{version}
Requires: device-mapper
Requires: %{name}-kmod = %{version}
Provides: %{name}-kmod-common = %{version}
@ -86,7 +85,6 @@ Conflicts: zfs-fuse
BuildRequires: zlib-devel
BuildRequires: libuuid-devel
BuildRequires: libblkid-devel
BuildRequires: device-mapper-devel
BuildRequires: libudev-devel
BuildRequires: libattr-devel
%endif

View File

@ -69,6 +69,21 @@ check_pool_status
log_must eval "$ZPOOL status -v $TESTPOOL > /tmp/pool-status.$$"
check_pool_status
# Make sure -c option works, and that VDEV_PATH and VDEV_UPATH get set.
#
# grep for '^\s+/' to just get the vdevs (not pools). All vdevs will start with
# a '/' when we specify the path (-P) flag. We check for "{}" to see if one
# of the VDEV variables isn't set.
C1=$($ZPOOL status -P | $GREP -E '^\s+/' | $WC -l)
C2=$($ZPOOL status -P -c 'echo vdev_test{$VDEV_PATH}{$VDEV_UPATH}' | \
$GREP -E '^\s+/' | $GREP -v '{}' | $WC -l)
if [ "$C1" != "$C2" ] ; then
log_fail "zpool status -c option failed. Expected $C1 vdevs, got $C2"
else
log_pass "zpool status -c option passed. Expected $C1 vdevs, got $C2"
fi
# $TESTPOOL.virt has an offline device, so -x will show it
log_must eval "$ZPOOL status -x $TESTPOOL.virt > /tmp/pool-status.$$"
check_pool_status

View File

@ -5,4 +5,5 @@ dist_pkgdata_SCRIPTS = \
zpool_iostat_001_neg.ksh \
zpool_iostat_002_pos.ksh \
zpool_iostat_003_neg.ksh \
zpool_iostat_004_pos.ksh
zpool_iostat_004_pos.ksh \
zpool_iostat_005_pos.ksh

View File

@ -0,0 +1,63 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
#
# Copyright (c) 2016 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
verify_runnable "both"
typeset testpool
if is_global_zone ; then
testpool=$TESTPOOL
else
testpool=${TESTPOOL%%/*}
fi
#
# DESCRIPTION:
# Verify 'zpool iostat -c CMD' works, and that VDEV_PATH and VDEV_UPATH get set.
#
# STRATEGY:
# grep for '^\s+/' to just get the vdevs (not pools). All vdevs will start with
# a '/' when we specify the path (-P) flag. We check for "{}" to see if one
# of the VDEV variables isn't set.
#
C1=$($ZPOOL iostat -Pv | $GREP -E '^\s+/' | $WC -l)
C2=$($ZPOOL iostat -Pv -c 'echo vdev_test{$VDEV_PATH}{$VDEV_UPATH}' | $GREP -E '^\s+/' | $GREP -v '{}' | $WC -l)
if [ "$C1" != "$C2" ] ; then
log_fail "zpool iostat -c failed, expected $C1 vdevs, got $C2"
else
log_pass "zpool iostat -c passed, expected $C1 vdevs, got $C2"
fi