mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2026-01-25 10:12:13 +03:00
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
239 lines
4.1 KiB
Makefile
239 lines
4.1 KiB
Makefile
include $(top_srcdir)/config/Rules.am
|
|
|
|
VPATH = \
|
|
$(top_srcdir)/module/zfs \
|
|
$(top_srcdir)/module/zcommon \
|
|
$(top_srcdir)/module/lua \
|
|
$(top_srcdir)/module/os/linux/zfs \
|
|
$(top_srcdir)/lib/libzpool
|
|
|
|
if BUILD_FREEBSD
|
|
DEFAULT_INCLUDES += -I$(top_srcdir)/include/os/freebsd/zfs
|
|
endif
|
|
if BUILD_LINUX
|
|
DEFAULT_INCLUDES += -I$(top_srcdir)/include/os/linux/zfs
|
|
endif
|
|
|
|
# Unconditionally enable debugging for libzpool
|
|
AM_CPPFLAGS += -DDEBUG -UNDEBUG -DZFS_DEBUG
|
|
|
|
# Suppress unused but set variable warnings often due to ASSERTs
|
|
AM_CFLAGS += $(NO_UNUSED_BUT_SET_VARIABLE)
|
|
|
|
# Includes kernel code generate warnings for large stack frames
|
|
AM_CFLAGS += $(FRAME_LARGER_THAN)
|
|
|
|
AM_CFLAGS += $(ZLIB_CFLAGS)
|
|
|
|
AM_CFLAGS += -DLIB_ZPOOL_BUILD
|
|
|
|
lib_LTLIBRARIES = libzpool.la
|
|
|
|
USER_C = \
|
|
kernel.c \
|
|
taskq.c \
|
|
util.c
|
|
|
|
KERNEL_C = \
|
|
zfeature_common.c \
|
|
zfs_comutil.c \
|
|
zfs_deleg.c \
|
|
zfs_fletcher.c \
|
|
zfs_fletcher_aarch64_neon.c \
|
|
zfs_fletcher_avx512.c \
|
|
zfs_fletcher_intel.c \
|
|
zfs_fletcher_sse.c \
|
|
zfs_fletcher_superscalar.c \
|
|
zfs_fletcher_superscalar4.c \
|
|
zfs_namecheck.c \
|
|
zfs_prop.c \
|
|
zfs_uio.c \
|
|
zpool_prop.c \
|
|
zprop_common.c \
|
|
abd.c \
|
|
abd_os.c \
|
|
aggsum.c \
|
|
arc.c \
|
|
arc_os.c \
|
|
blkptr.c \
|
|
bplist.c \
|
|
bpobj.c \
|
|
bptree.c \
|
|
btree.c \
|
|
bqueue.c \
|
|
cityhash.c \
|
|
dbuf.c \
|
|
dbuf_stats.c \
|
|
ddt.c \
|
|
ddt_zap.c \
|
|
dmu.c \
|
|
dmu_diff.c \
|
|
dmu_object.c \
|
|
dmu_objset.c \
|
|
dmu_recv.c \
|
|
dmu_redact.c \
|
|
dmu_send.c \
|
|
dmu_traverse.c \
|
|
dmu_tx.c \
|
|
dmu_zfetch.c \
|
|
dnode.c \
|
|
dnode_sync.c \
|
|
dsl_bookmark.c \
|
|
dsl_dataset.c \
|
|
dsl_deadlist.c \
|
|
dsl_deleg.c \
|
|
dsl_dir.c \
|
|
dsl_crypt.c \
|
|
dsl_pool.c \
|
|
dsl_prop.c \
|
|
dsl_scan.c \
|
|
dsl_synctask.c \
|
|
dsl_destroy.c \
|
|
dsl_userhold.c \
|
|
edonr_zfs.c \
|
|
hkdf.c \
|
|
fm.c \
|
|
gzip.c \
|
|
lzjb.c \
|
|
lz4.c \
|
|
metaslab.c \
|
|
mmp.c \
|
|
multilist.c \
|
|
objlist.c \
|
|
pathname.c \
|
|
range_tree.c \
|
|
refcount.c \
|
|
rrwlock.c \
|
|
sa.c \
|
|
sha256.c \
|
|
skein_zfs.c \
|
|
spa.c \
|
|
spa_boot.c \
|
|
spa_checkpoint.c \
|
|
spa_config.c \
|
|
spa_errlog.c \
|
|
spa_history.c \
|
|
spa_log_spacemap.c \
|
|
spa_misc.c \
|
|
spa_stats.c \
|
|
space_map.c \
|
|
space_reftree.c \
|
|
txg.c \
|
|
trace.c \
|
|
uberblock.c \
|
|
unique.c \
|
|
vdev.c \
|
|
vdev_cache.c \
|
|
vdev_draid.c \
|
|
vdev_draid_rand.c \
|
|
vdev_file.c \
|
|
vdev_indirect_births.c \
|
|
vdev_indirect.c \
|
|
vdev_indirect_mapping.c \
|
|
vdev_initialize.c \
|
|
vdev_label.c \
|
|
vdev_mirror.c \
|
|
vdev_missing.c \
|
|
vdev_queue.c \
|
|
vdev_raidz.c \
|
|
vdev_raidz_math_aarch64_neon.c \
|
|
vdev_raidz_math_aarch64_neonx2.c \
|
|
vdev_raidz_math_avx2.c \
|
|
vdev_raidz_math_avx512bw.c \
|
|
vdev_raidz_math_avx512f.c \
|
|
vdev_raidz_math.c \
|
|
vdev_raidz_math_scalar.c \
|
|
vdev_raidz_math_sse2.c \
|
|
vdev_raidz_math_ssse3.c \
|
|
vdev_raidz_math_powerpc_altivec.c \
|
|
vdev_rebuild.c \
|
|
vdev_removal.c \
|
|
vdev_root.c \
|
|
vdev_trim.c \
|
|
zap.c \
|
|
zap_leaf.c \
|
|
zap_micro.c \
|
|
zcp.c \
|
|
zcp_get.c \
|
|
zcp_global.c \
|
|
zcp_iter.c \
|
|
zcp_set.c \
|
|
zcp_synctask.c \
|
|
zfeature.c \
|
|
zfs_byteswap.c \
|
|
zfs_debug.c \
|
|
zfs_fm.c \
|
|
zfs_fuid.c \
|
|
zfs_sa.c \
|
|
zfs_znode.c \
|
|
zfs_ratelimit.c \
|
|
zfs_rlock.c \
|
|
zil.c \
|
|
zio.c \
|
|
zio_checksum.c \
|
|
zio_compress.c \
|
|
zio_crypt.c \
|
|
zio_inject.c \
|
|
zle.c \
|
|
zrlock.c \
|
|
zthr.c
|
|
|
|
LUA_C = \
|
|
lapi.c \
|
|
lauxlib.c \
|
|
lbaselib.c \
|
|
lcode.c \
|
|
lcompat.c \
|
|
lcorolib.c \
|
|
lctype.c \
|
|
ldebug.c \
|
|
ldo.c \
|
|
lfunc.c \
|
|
lgc.c \
|
|
llex.c \
|
|
lmem.c \
|
|
lobject.c \
|
|
lopcodes.c \
|
|
lparser.c \
|
|
lstate.c \
|
|
lstring.c \
|
|
lstrlib.c \
|
|
ltable.c \
|
|
ltablib.c \
|
|
ltm.c \
|
|
lvm.c \
|
|
lzio.c
|
|
|
|
dist_libzpool_la_SOURCES = \
|
|
$(USER_C)
|
|
|
|
nodist_libzpool_la_SOURCES = \
|
|
$(KERNEL_C) \
|
|
$(LUA_C)
|
|
|
|
libzpool_la_LIBADD = \
|
|
$(abs_top_builddir)/lib/libicp/libicp.la \
|
|
$(abs_top_builddir)/lib/libunicode/libunicode.la \
|
|
$(abs_top_builddir)/lib/libzfs_core/libzfs_core.la \
|
|
$(abs_top_builddir)/lib/libnvpair/libnvpair.la \
|
|
$(abs_top_builddir)/lib/libzstd/libzstd.la
|
|
|
|
libzpool_la_LIBADD += $(LIBCLOCK_GETTIME) $(ZLIB_LIBS) -ldl -lm
|
|
|
|
libzpool_la_LDFLAGS = -pthread
|
|
|
|
if !ASAN_ENABLED
|
|
libzpool_la_LDFLAGS += -Wl,-z,defs
|
|
endif
|
|
|
|
if BUILD_FREEBSD
|
|
libzpool_la_LIBADD += -lgeom
|
|
endif
|
|
|
|
libzpool_la_LDFLAGS += -version-info 4:0:0
|
|
|
|
if TARGET_CPU_POWERPC
|
|
vdev_raidz_math_powerpc_altivec.$(OBJEXT): CFLAGS += -maltivec
|
|
vdev_raidz_math_powerpc_altivec.l$(OBJEXT): CFLAGS += -maltivec
|
|
endif
|