mirror_zfs/etc/init.d/zfs.gentoo.in
Richard Yao 9eaf0832ad Improve OpenRC init script
The current zfs OpenRC script's dependencies cause OpenRC to attempt to
unmount ZFS filesystems at shutdown while things were still using them,
which would fail. This is a cosmetic issue, but it should still be
addressed. It probably does not affect systems where the rootfs is a
legacy filesystem, but any system with the rootfs on ZFS needs to run
the ZFS init script after the system is ready to shutdown filesystems.

OpenRC's shutdown process occurs in the reverse order of the startup
process. Therefore running the ZFS shutdown procedure after filesystems
are ready to be unmounted requires running the startup procedure before
fstab. This patch changes the dependencies of the script to expliclty
run before fstab at boot when the rootfs is ZFS and to run after fstab
at boot whenever the rootfs is not ZFS. This should cover most use
cases.

The only cases not covered well by this are systems with legacy
root filesystems where people want to configure fstab to mount a non-ZFS
filesystem off a zvol and possibly also systems whose pools are stored
on network block devices. The former requires that the ZFS script run
before fstab, which could cause ZFS datasets to mount too early and
appear under the fstab mount points. The latter requires that the ZFS
script run after networking starts, which precludes the ability to store
any system information on ZFS. An additional OpenRC script could be
written to handle non-root pools on network block devices, but that will
depend on user demand and developer time.

Signed-off-by: Richard Yao <ryao@cs.stonybrook.edu>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1479
2013-06-18 17:03:25 -07:00

124 lines
2.6 KiB
Plaintext

#!/sbin/runscript
# Copyright 1999-2011 Gentoo Foundation
# Released under the 2-clause BSD license.
# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $
if [ -z "$init" ]; then
# Not interactive
grep -Eqi 'zfs=off|zfs=no' /proc/cmdline && exit 3
fi
depend()
{
# Try to allow people to mix and match fstab with ZFS in a way that makes sense.
if [ "$(mountinfo -s /)" = 'zfs' ]
then
before localmount
else
after localmount
fi
# bootmisc will log to /var which may be a different zfs than root.
before bootmisc logger
keyword -lxc -openvz -prefix -vserver
}
ZFS="@sbindir@/zfs"
ZPOOL="@sbindir@/zpool"
ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
ZFS_MODULE=zfs
checksystem() {
if [ ! -c /dev/zfs ]; then
einfo "Checking if ZFS modules present"
if ! modinfo zfs > /dev/null 2>&1 ; then
eerror "$ZFS_MODULE not found. Is the ZFS package installed?"
return 1
fi
fi
einfo "Checking if zfs userspace tools present"
if [ ! -x $ZPOOL ]; then
eerror "$ZPOOL binary not found."
return 1
fi
if [ ! -x $ZFS ]; then
eerror "$ZFS binary not found."
return 1
fi
return 0
}
start() {
ebegin "Starting ZFS"
checksystem || return 1
# Delay until all required block devices are present.
udevadm settle
if [ ! -c /dev/zfs ]; then
modprobe $ZFS_MODULE
rv=$?
if [ $rv -ne 0 ]; then
eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'."
eend $rv
return $rv
fi
fi
# Import all pools described by the cache file, and then mount
# all filesystem based on their properties.
if [ -f $ZPOOL_CACHE ]; then
einfo "Importing ZFS pools"
# as per fedora script, import can fail if all pools are already imported
# The check for $rv makes no sense...but someday, it will work right.
$ZPOOL import -c $ZPOOL_CACHE -aN 2>/dev/null || true
rv=$?
if [ $rv -ne 0 ]; then
eerror "Failed to import not-yet imported pools."
eend $rv
return $rv
fi
fi
einfo "Mounting ZFS filesystems"
$ZFS mount -a
rv=$?
if [ $rv -ne 0 ]; then
eerror "Failed to mount ZFS filesystems."
eend $rv
return $rv
fi
einfo "Exporting ZFS filesystems"
$ZFS share -a
rv=$?
if [ $rv -ne 0 ]; then
eerror "Failed to export ZFS filesystems."
eend $rv
return $rv
fi
eend 0
return 0
}
stop()
{
ebegin "Unmounting ZFS filesystems"
$ZFS umount -a
rv=$?
if [ $rv -ne 0 ]; then
einfo "Some ZFS filesystems not unmounted"
fi
# Don't fail if we couldn't umount everything. /usr might be in use.
eend 0
return 0
}
status()
{
# show pool status and list
$ZPOOL status && echo && $ZPOOL list
}