Shell script good practices changes.

It's considered good practice to:
1) Wrap the variable name in `{}`.
   As in `${variable}` instead of `$variable`.
2) Put variables in `"`.

Also some minor error message tuning.

Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Rob Norris <robn@despairlabs.com>
Signed-off-by: Turbo Fredriksson <turbo@bayour.com>
Closes #18000
This commit is contained in:
Turbo Fredriksson 2025-12-01 18:15:04 +00:00 committed by Brian Behlendorf
parent 61ab032ae0
commit d3b447de4e

View File

@ -32,18 +32,18 @@ pre_mountroot()
then
if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
then
[ "$quiet" != "y" ] && \
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-top"
run_scripts /scripts/local-top
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
then
[ "$quiet" != "y" ] && \
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-premount"
run_scripts /scripts/local-premount
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
fi
}
@ -60,10 +60,10 @@ disable_plymouth()
# Get a ZFS filesystem property value.
get_fs_value()
{
get_fs="$1"
get_value=$2
get_fs="${1}"
get_value="${2}"
"${ZFS}" get -H -ovalue "$get_value" "$get_fs" 2> /dev/null
"${ZFS}" get -H -ovalue "${get_value}" "${get_fs}" 2> /dev/null
}
# Find the 'bootfs' property on pool $1.
@ -71,7 +71,7 @@ get_fs_value()
# pool by exporting it again.
find_rootfs()
{
find_rootfs_pool="$1"
find_rootfs_pool="${1}"
# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
# we won't be able to find a root fs.
@ -85,7 +85,7 @@ find_rootfs()
# Not set, try to find it in the 'bootfs' property of the pool.
# NOTE: zpool does not support 'get -H -ovalue bootfs'...
ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$find_rootfs_pool")
ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "${find_rootfs_pool}")
# Make sure it's not '-' and that it starts with /.
if [ "${ZFS_BOOTFS}" != "-" ] && \
@ -97,7 +97,7 @@ find_rootfs()
fi
# Not boot fs here, export it and later try again..
"${ZPOOL}" export "$find_rootfs_pool"
"${ZPOOL}" export "${find_rootfs_pool}"
POOL_IMPORTED=
ZFS_BOOTFS=
return 1
@ -106,7 +106,7 @@ find_rootfs()
# Support function to get a list of all pools, separated with ';'
find_pools()
{
find_pools=$("$@" 2> /dev/null | \
find_pools=$("${@}" 2> /dev/null | \
sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
tr '\n' ';')
@ -117,98 +117,98 @@ find_pools()
get_pools()
{
if [ -n "${ZFS_POOL_IMPORT}" ]; then
echo "$ZFS_POOL_IMPORT"
echo "${ZFS_POOL_IMPORT}"
return 0
fi
# Get the base list of available pools.
available_pools=$(find_pools "$ZPOOL" import)
available_pools="$(find_pools "${ZPOOL}" import)"
# Just in case - seen it happen (that a pool isn't visible/found
# with a simple "zpool import" but only when using the "-d"
# option or setting ZPOOL_IMPORT_PATH).
if [ -d "/dev/disk/by-id" ]
then
npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
if [ -n "$npools" ]
npools="$(find_pools "${ZPOOL}" import -d /dev/disk/by-id)"
if [ -n "${npools}" ]
then
# Because we have found extra pool(s) here, which wasn't
# found 'normally', we need to force USE_DISK_BY_ID to
# make sure we're able to actually import it/them later.
USE_DISK_BY_ID='yes'
if [ -n "$available_pools" ]
if [ -n "${available_pools}" ]
then
# Filter out duplicates (pools found with the simple
# "zpool import" but which is also found with the
# "zpool import -d ...").
npools=$(echo "$npools" | sed "s,$available_pools,,")
npools="$(echo "${npools}" | sed "s,${available_pools},,")"
# Add the list to the existing list of
# available pools
available_pools="$available_pools;$npools"
available_pools="${available_pools};${npools}"
else
available_pools="$npools"
available_pools="${npools}"
fi
fi
fi
# Filter out any exceptions...
if [ -n "$ZFS_POOL_EXCEPTIONS" ]
if [ -n "${ZFS_POOL_EXCEPTIONS}" ]
then
found=""
apools=""
OLD_IFS="$IFS" ; IFS=";"
OLD_IFS="${IFS}" ; IFS=";"
for pool in $available_pools
for pool in ${available_pools}
do
for exception in $ZFS_POOL_EXCEPTIONS
for exception in ${ZFS_POOL_EXCEPTIONS}
do
[ "$pool" = "$exception" ] && continue 2
found="$pool"
[ "${pool}" = "${exception}" ] && continue 2
found="${pool}"
done
if [ -n "$found" ]
if [ -n "${found}" ]
then
if [ -n "$apools" ]
if [ -n "${apools}" ]
then
apools="$apools;$pool"
apools="${apools};${pool}"
else
apools="$pool"
apools="${pool}"
fi
fi
done
IFS="$OLD_IFS"
available_pools="$apools"
IFS="${OLD_IFS}"
available_pools="${apools}"
fi
# Return list of available pools.
echo "$available_pools"
echo "${available_pools}"
}
# Import given pool $1
import_pool()
{
import_pool="$1"
import_pool="${1}"
# Verify that the pool isn't already imported
# Make as sure as we can to not require '-f' to import.
"${ZPOOL}" get -H -o value name,guid 2>/dev/null | grep -Fxq "$import_pool" && return 0
"${ZPOOL}" get -H -o value name,guid 2>/dev/null | grep -Fxq "${import_pool}" && return 0
# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
# to something we can use later with the real import(s). We want to
# make sure we find all by* dirs, BUT by-vdev should be first (if it
# exists).
if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
if [ -n "${USE_DISK_BY_ID}" ] && [ -z "${ZPOOL_IMPORT_PATH}" ]
then
dirs="$(for dir in /dev/disk/by-*
do
# Ignore by-vdev here - we want it first!
echo "$dir" | grep -q /by-vdev && continue
[ ! -d "$dir" ] && continue
echo "${dir}" | grep -q /by-vdev && continue
[ ! -d "${dir}" ] && continue
printf "%s" "$dir:"
printf "%s" "${dir}:"
done | sed 's,:$,,g')"
if [ -d "/dev/disk/by-vdev" ]
@ -218,50 +218,50 @@ import_pool()
fi
# ... and /dev at the very end, just for good measure.
ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
ZPOOL_IMPORT_PATH="${ZPOOL_IMPORT_PATH}${dirs}:/dev"
fi
# Needs to be exported for "zpool" to catch it.
[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
[ -n "${ZPOOL_IMPORT_PATH}" ] && export ZPOOL_IMPORT_PATH
[ "$quiet" != "y" ] && zfs_log_begin_msg \
[ "${quiet}" != "y" ] && zfs_log_begin_msg \
"Importing pool '${import_pool}' using defaults"
ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$import_pool" 2>&1)"
ZFS_ERROR="$?"
ZFS_STDERR="$(${ZFS_CMD} "${import_pool}" 2>&1)"
ZFS_ERROR="${?}"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
if [ -f "${ZPOOL_CACHE}" ]
then
[ "$quiet" != "y" ] && zfs_log_begin_msg \
[ "${quiet}" != "y" ] && zfs_log_begin_msg \
"Importing pool '${import_pool}' using cachefile."
ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$import_pool" 2>&1)"
ZFS_ERROR="$?"
ZFS_STDERR="$(${ZFS_CMD} "${import_pool}" 2>&1)"
ZFS_ERROR="${?}"
fi
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} '$import_pool'"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Command: ${ZFS_CMD} '${import_pool}'"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "Failed to import pool '$import_pool'."
echo "Failed to import pool '${import_pool}'."
echo "Manually import the pool and exit."
shell
fi
fi
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
POOL_IMPORTED=1
return 0
@ -274,14 +274,13 @@ load_module_initrd()
{
ZFS_INITRD_PRE_MOUNTROOT_SLEEP=${ROOTDELAY:-0}
if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
[ "$quiet" != "y" ] && zfs_log_begin_msg "Delaying for up to '${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}' seconds."
if [ "${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}" -gt 0 ]; then
[ "${quiet}" != "y" ] && zfs_log_begin_msg "Delaying for up to '${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}' seconds."
fi
START=$(/bin/date -u +%s)
END=$((START+ZFS_INITRD_PRE_MOUNTROOT_SLEEP))
START="$(/bin/date -u +%s)"
END="$((START+ZFS_INITRD_PRE_MOUNTROOT_SLEEP))"
while true; do
# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
if command -v wait_for_udev > /dev/null 2>&1 ; then
wait_for_udev 10
@ -303,24 +302,24 @@ load_module_initrd()
ret=1
fi
[ "$(/bin/date -u +%s)" -gt "$END" ] && break
[ "$(/bin/date -u +%s)" -gt "${END}" ] && break
sleep 1
done
if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
[ "$quiet" != "y" ] && zfs_log_end_msg
if [ "${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}" -gt 0 ]; then
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
[ "$ret" -ne 0 ] && return 1
[ "${ret}" -ne 0 ] && return 1
if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
if [ "${ZFS_INITRD_POST_MODPROBE_SLEEP}" -gt 0 ] 2>/dev/null
then
if [ "$quiet" != "y" ]; then
if [ "${quiet}" != "y" ]; then
zfs_log_begin_msg "Sleeping for" \
"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
"${ZFS_INITRD_POST_MODPROBE_SLEEP} seconds..."
fi
sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
[ "$quiet" != "y" ] && zfs_log_end_msg
sleep "${ZFS_INITRD_POST_MODPROBE_SLEEP}"
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
return 0
@ -329,71 +328,71 @@ load_module_initrd()
# Mount a given filesystem
mount_fs()
{
mount_fs="$1"
mount_fs="${1}"
# Check that the filesystem exists
"${ZFS}" list -oname -tfilesystem -H "${mount_fs}" > /dev/null 2>&1 || return 1
# Skip filesystems with canmount=off. The root fs should not have
# canmount=off, but ignore it for backwards compatibility just in case.
if [ "$mount_fs" != "${ZFS_BOOTFS}" ]
if [ "${mount_fs}" != "${ZFS_BOOTFS}" ]
then
canmount=$(get_fs_value "$mount_fs" canmount)
[ "$canmount" = "off" ] && return 0
canmount="$(get_fs_value "${mount_fs}" canmount)"
[ "${canmount}" = "off" ] && return 0
fi
# Need the _original_ datasets mountpoint!
mountpoint=$(get_fs_value "$mount_fs" mountpoint)
mountpoint="$(get_fs_value "${mount_fs}" mountpoint)"
ZFS_CMD="mount.zfs -o zfsutil"
if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
if [ "${mountpoint}" = "legacy" ] || [ "${mountpoint}" = "none" ]; then
# Can't use the mountpoint property. Might be one of our
# clones. Check the 'org.zol:mountpoint' property set in
# clone_snap() if that's usable.
mountpoint1=$(get_fs_value "$mount_fs" org.zol:mountpoint)
if [ "$mountpoint1" = "legacy" ] ||
[ "$mountpoint1" = "none" ] ||
[ "$mountpoint1" = "-" ]
mountpoint1="$(get_fs_value "${mount_fs}" org.zol:mountpoint)"
if [ "${mountpoint1}" = "legacy" ] ||
[ "${mountpoint1}" = "none" ] ||
[ "${mountpoint1}" = "-" ]
then
if [ "$mount_fs" != "${ZFS_BOOTFS}" ]; then
if [ "${mount_fs}" != "${ZFS_BOOTFS}" ]; then
# We don't have a proper mountpoint and this
# isn't the root fs.
return 0
fi
if [ "$mountpoint" = "legacy" ]; then
if [ "${mountpoint}" = "legacy" ]; then
ZFS_CMD="mount.zfs"
fi
# Last hail-mary: Hope 'rootmnt' is set!
mountpoint=""
else
mountpoint="$mountpoint1"
mountpoint="${mountpoint1}"
fi
fi
# Possibly decrypt a filesystem using native encryption.
decrypt_fs "$mount_fs"
decrypt_fs "${mount_fs}"
[ "$quiet" != "y" ] && \
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Mounting '${mount_fs}' on '${rootmnt}/${mountpoint}'"
[ -n "${ZFS_DEBUG}" ] && \
zfs_log_begin_msg "CMD: '$ZFS_CMD ${mount_fs} ${rootmnt}/${mountpoint}'"
zfs_log_begin_msg "CMD: '${ZFS_CMD} ${mount_fs} ${rootmnt}/${mountpoint}'"
ZFS_STDERR=$(${ZFS_CMD} "${mount_fs}" "${rootmnt}/${mountpoint}" 2>&1)
ZFS_ERROR=$?
ZFS_STDERR="$(${ZFS_CMD} "${mount_fs}" "${rootmnt}/${mountpoint}" 2>&1)"
ZFS_ERROR="${?}"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} ${mount_fs} ${rootmnt}/${mountpoint}"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "Failed to mount ${mount_fs} on ${rootmnt}/${mountpoint}."
echo "Manually mount the filesystem and exit."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
return 0
@ -402,10 +401,10 @@ mount_fs()
# Unlock a ZFS native encrypted filesystem.
decrypt_fs()
{
decrypt_fs="$1"
decrypt_fs="${1}"
# If pool encryption is active and the zfs command understands '-o encryption'
if [ "$(zpool list -H -o feature@encryption "${decrypt_fs%%/*}")" = 'active' ]; then
if [ "$("${ZPOOL}" list -H -o feature@encryption "${decrypt_fs%%/*}")" = 'active' ]; then
# Determine dataset that holds key for root dataset
ENCRYPTIONROOT="$(get_fs_value "${decrypt_fs}" encryptionroot)"
@ -417,28 +416,28 @@ decrypt_fs()
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
# Continue only if the key needs to be loaded
[ "$KEYSTATUS" = "unavailable" ] || return 0
[ "${KEYSTATUS}" = "unavailable" ] || return 0
# Try extensions first
for f in "/etc/zfs/initramfs-tools-load-key" "/etc/zfs/initramfs-tools-load-key.d/"*; do
[ -r "$f" ] || continue
(. "$f") && {
for key in "/etc/zfs/initramfs-tools-load-key" "/etc/zfs/initramfs-tools-load-key.d/"*; do
[ -r "${key}" ] || continue
(. "${key}") && {
# Successful return and actually-loaded key: we're done
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
[ "$KEYSTATUS" = "unavailable" ] || return 0
[ "${KEYSTATUS}" = "unavailable" ] || return 0
}
done
# Do not prompt if key is stored noninteractively,
if ! [ "${KEYLOCATION}" = "prompt" ]; then
$ZFS load-key "${ENCRYPTIONROOT}"
"${ZFS}" load-key "${ENCRYPTIONROOT}"
# Prompt with plymouth, if active
elif /bin/plymouth --ping 2>/dev/null; then
echo "plymouth" > /run/zfs_console_askpwd_cmd
for _ in 1 2 3; do
plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
"${ZFS}" load-key "${ENCRYPTIONROOT}" && break
done
# Prompt with systemd, if active
@ -446,7 +445,7 @@ decrypt_fs()
echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
for _ in 1 2 3; do
systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
"${ZFS}" load-key "${ENCRYPTIONROOT}" && break
done
# Prompt with ZFS tty, otherwise
@ -455,8 +454,8 @@ decrypt_fs()
echo "load-key" > /run/zfs_console_askpwd_cmd
read -r storeprintk _ < /proc/sys/kernel/printk
echo 7 > /proc/sys/kernel/printk
$ZFS load-key "${ENCRYPTIONROOT}"
echo "$storeprintk" > /proc/sys/kernel/printk
"${ZFS}" load-key "${ENCRYPTIONROOT}"
echo "${storeprintk}" > /proc/sys/kernel/printk
fi
fi
fi
@ -467,30 +466,30 @@ decrypt_fs()
# Destroy a given filesystem.
destroy_fs()
{
destroy_fs="$1"
destroy_fs="${1}"
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Destroying '$destroy_fs'"
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Destroying '${destroy_fs}'"
ZFS_CMD="${ZFS} destroy $destroy_fs"
ZFS_CMD="${ZFS} destroy ${destroy_fs}"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
ZFS_ERROR="${?}"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Command: ${ZFS_CMD}"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "Failed to destroy '$destroy_fs'. Please make sure that '$destroy_fs' is not available."
echo "Hint: Try: zfs destroy -Rfn $destroy_fs"
echo "Failed to destroy '${destroy_fs}'. Please make sure that it is not available."
echo "Hint: Try: zfs destroy -Rfn ${destroy_fs}"
echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
return 0
@ -502,11 +501,11 @@ destroy_fs()
# mounted with a 'zfs mount -a' in the init/systemd scripts).
clone_snap()
{
clone_snap="$1"
clone_destfs="$2"
clone_mountpoint="$3"
clone_snap="${1}"
clone_destfs="${2}"
clone_mountpoint="${3}"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$clone_snap' to '$clone_destfs'"
[ "${quiet}" != "y" ] && zfs_log_begin_msg "Cloning '${clone_snap}' to '${clone_destfs}'"
# Clone the snapshot into a dataset we can boot from
# + We don't want this filesystem to be automatically mounted, we
@ -515,25 +514,25 @@ clone_snap()
# We use the 'org.zol:mountpoint' property to remember the mountpoint.
ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${clone_mountpoint}"
ZFS_CMD="${ZFS_CMD} $clone_snap $clone_destfs"
ZFS_CMD="${ZFS_CMD} ${clone_snap} ${clone_destfs}"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
ZFS_ERROR="${?}"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Command: ${ZFS_CMD}"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "Failed to clone snapshot."
echo "Make sure that any problems are corrected and then make sure"
echo "that the dataset '$clone_destfs' exists and is bootable."
echo "that the dataset '${clone_destfs}' exists and is bootable."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
return 0
@ -542,27 +541,27 @@ clone_snap()
# Rollback a given snapshot.
rollback_snap()
{
rollback_snap="$1"
rollback_snap="${1}"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $rollback_snap"
[ "${quiet}" != "y" ] && zfs_log_begin_msg "Rollback ${rollback_snap}"
ZFS_CMD="${ZFS} rollback -Rf $rollback_snap"
ZFS_CMD="${ZFS} rollback -Rf ${rollback_snap}"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
ZFS_ERROR="${?}"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
[ "${quiet}" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Command: ${ZFS_CMD}"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "Failed to rollback snapshot."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
return 0
@ -572,7 +571,7 @@ rollback_snap()
# to the user to choose from.
ask_user_snap()
{
ask_snap="$1"
ask_snap="${1}"
# We need to temporarily disable debugging. Set 'debug' so we
# remember to enabled it again.
@ -590,18 +589,18 @@ ask_user_snap()
" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${ask_snap}")
i=1
for snap in "$@"; do
echo " $i: $snap"
i=$((i + 1))
for snap in "${@}"; do
echo " ${i}: ${snap}"
i="$((i + 1))"
done > /dev/stderr
# expr instead of test here because [ a -lt 0 ] errors out,
# but expr falls back to lexicographical, which works out right
snapnr=0
while expr "$snapnr" "<" 1 > /dev/null ||
expr "$snapnr" ">" "$#" > /dev/null
while expr "${snapnr}" "<" 1 > /dev/null ||
expr "${snapnr}" ">" "${#}" > /dev/null
do
printf "%s" "Snap nr [1-$#]? " > /dev/stderr
printf "%s" "Snap nr [1-${#}]? " > /dev/stderr
read -r snapnr
done
@ -611,12 +610,12 @@ ask_user_snap()
set -x
fi
eval echo '$'"$snapnr"
eval echo '$'"${snapnr}"
}
setup_snapshot_booting()
{
boot_snap="$1"
boot_snap="${1}"
retval=0
# Make sure that the snapshot specified actually exists.
@ -627,7 +626,7 @@ setup_snapshot_booting()
snap="$(ask_user_snap "${boot_snap%%@*}")"
fi
# Separate the full snapshot ('$snap') into it's filesystem and
# Separate the full snapshot ('${snap}') into it's filesystem and
# snapshot names. Would have been nice with a split() function..
rootfs="${boot_snap%%@*}"
snapname="${boot_snap##*@}"
@ -639,9 +638,9 @@ setup_snapshot_booting()
# already exists, destroy it. Recursively
if [ -n "$(get_fs_value "${rootfs}_${snapname}" type)" ]
then
filesystems=$("${ZFS}" list -oname -tfilesystem -H \
-r -Sname "${ZFS_BOOTFS}")
for fs in $filesystems; do
filesystems="$("${ZFS}" list -oname -tfilesystem -H \
-r -Sname "${ZFS_BOOTFS}")"
for fs in ${filesystems}; do
destroy_fs "${boot_snap}"
done
fi
@ -655,7 +654,7 @@ setup_snapshot_booting()
if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
then
# Rollback snapshot
rollback_snap "$s" || retval=$((retval + 1))
rollback_snap "${s}" || retval="$((retval + 1))"
ZFS_BOOTFS="${rootfs}"
else
# Setup a destination filesystem name.
@ -664,18 +663,18 @@ setup_snapshot_booting()
# rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
# rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
# rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
subfs="${s##"$rootfs"}"
subfs="${subfs%%@"$snapname"}"
subfs="${s##"${rootfs}"}"
subfs="${subfs%%@"${snapname}"}"
destfs="${rootfs}_${snapname}" # base fs.
[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
[ -n "${subfs}" ] && destfs="${destfs}${subfs}" # + sub fs.
# Get the mountpoint of the filesystem, to be used
# with clone_snap(). If legacy or none, then use
# the sub fs value.
mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
if [ "$mountpoint" = "legacy" ] || \
[ "$mountpoint" = "none" ]
mountpoint="$(get_fs_value "${s%%@*}" mountpoint)"
if [ "${mountpoint}" = "legacy" ] || \
[ "${mountpoint}" = "none" ]
then
if [ -n "${subfs}" ]; then
mountpoint="${subfs}"
@ -686,8 +685,8 @@ setup_snapshot_booting()
# Clone the snapshot into its own
# filesystem
clone_snap "$s" "${destfs}" "${mountpoint}" || \
retval=$((retval + 1))
clone_snap "${s}" "${destfs}" "${mountpoint}" || \
retval="$((retval + 1))"
fi
done
@ -740,11 +739,11 @@ mountroot()
# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
# 'root' is for Redhat/Fedora (etc),
# 'REAL_ROOT' is for Gentoo
if [ -z "$ROOT" ]
if [ -z "${ROOT}" ]
then
[ -n "$root" ] && ROOT=${root}
[ -n "${root}" ] && ROOT="${root}"
[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
[ -n "${REAL_ROOT}" ] && ROOT="${REAL_ROOT}"
fi
# ------------
@ -752,18 +751,18 @@ mountroot()
# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
# 'NEWROOT' is for RedHat/Fedora (etc),
# 'NEW_ROOT' is for Gentoo
if [ -z "$rootmnt" ]
if [ -z "${rootmnt}" ]
then
[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
[ -n "${NEWROOT}" ] && rootmnt="${NEWROOT}"
[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
[ -n "${NEW_ROOT}" ] && rootmnt="${NEW_ROOT}"
fi
# ------------
# No longer set in the defaults file, but it could have been set in
# get_pools() in some circumstances. If it's something, but not 'yes',
# it's no good to us.
[ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
[ -n "${USE_DISK_BY_ID}" ] && [ "${USE_DISK_BY_ID}" != 'yes' ] && \
unset USE_DISK_BY_ID
# ----------------------------------------------------------------
@ -803,19 +802,19 @@ mountroot()
# ------------
# Look for 'rpool' and 'bootfs' parameter
[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
[ -n "${rpool}" ] && ZFS_RPOOL="${rpool#rpool=}"
[ -n "${bootfs}" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
# ------------
# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
# 'ROOT'
[ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
[ -n "${ROOT}" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="${ROOT}"
# ------------
# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
# NOTE: Only use the pool name and dataset. The rest is not
# supported by OpenZFS (whatever it's for).
if [ -z "$ZFS_RPOOL" ]
if [ -z "${ZFS_RPOOL}" ]
then
# The ${zfs-bootfs} variable is set at the kernel command
# line, usually by GRUB, but it cannot be referenced here
@ -826,12 +825,12 @@ mountroot()
# stripping the zfs-bootfs= prefix. Let the shell handle
# quoting through the eval command:
# shellcheck disable=SC2046
eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
eval ZFS_RPOOL="$(set | sed -n -e 's,^zfs-bootfs=,,p')"
fi
# ------------
# No root fs or pool specified - do auto detect.
if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
if [ -z "${ZFS_RPOOL}" ] && [ -z "${ZFS_BOOTFS}" ]
then
# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
# which will be caught later
@ -842,7 +841,7 @@ mountroot()
# F I N D A N D I M P O R T C O R R E C T P O O L
# ------------
if [ "$ROOT" = "zfs:AUTO" ]
if [ "${ROOT}" = "zfs:AUTO" ]
then
# Try to detect both pool and root fs.
@ -851,29 +850,29 @@ mountroot()
# this says "zfs:AUTO" here and interferes with checks later
ZFS_BOOTFS=
[ "$quiet" != "y" ] && \
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Attempting to import additional pools."
# Get a list of pools available for import
if [ -n "$ZFS_RPOOL" ]
if [ -n "${ZFS_RPOOL}" ]
then
# We've specified a pool - check only that
POOLS=$ZFS_RPOOL
POOLS="${ZFS_RPOOL}"
else
POOLS=$(get_pools)
POOLS="$(get_pools)"
fi
OLD_IFS="$IFS" ; IFS=";"
for pool in $POOLS
OLD_IFS="${IFS}" ; IFS=";"
for pool in ${POOLS}
do
[ -z "$pool" ] && continue
[ -z "${pool}" ] && continue
IFS="$OLD_IFS" import_pool "$pool"
IFS="$OLD_IFS" find_rootfs "$pool" && break
IFS="${OLD_IFS}" import_pool "${pool}"
IFS="${OLD_IFS}" find_rootfs "${pool}" && break
done
IFS="$OLD_IFS"
IFS="${OLD_IFS}"
[ "$quiet" != "y" ] && zfs_log_end_msg "$ZFS_ERROR"
[ "${quiet}" != "y" ] && zfs_log_end_msg "${ZFS_ERROR}"
else
# No auto - use value from the command line option.
@ -885,15 +884,15 @@ mountroot()
fi
# Import the pool (if not already done so in the AUTO check above).
if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
if [ -n "${ZFS_RPOOL}" ] && [ -z "${POOL_IMPORTED}" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Importing ZFS root pool '${ZFS_RPOOL}'"
import_pool "${ZFS_RPOOL}"
find_rootfs "${ZFS_RPOOL}"
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
if [ -z "${POOL_IMPORTED}" ]
@ -901,9 +900,9 @@ mountroot()
# No pool imported, this is serious!
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo "Command: ${ZFS_CMD}"
echo "Message: ${ZFS_STDERR}"
echo "Error: ${ZFS_ERROR}"
echo ""
echo "No pool imported. Manually import the root pool"
echo "at the command prompt and then exit."
@ -914,10 +913,10 @@ mountroot()
# In case the pool was specified as guid, resolve guid to name
pool="$("${ZPOOL}" get -H -o name,value name,guid | \
awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
if [ -n "$pool" ]; then
# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
sed -e "s/$("${ZPOOL}" get -H -o value guid "$pool")/$pool/g")
if [ -n "${pool}" ]; then
# If ${ZFS_BOOTFS} contains guid, replace the guid portion with ${pool}.
ZFS_BOOTFS="$(echo "${ZFS_BOOTFS}" | \
sed -e "s/$("${ZPOOL}" get -H -o value guid "${pool}")/${pool}/g")"
ZFS_RPOOL="${pool}"
fi
@ -942,8 +941,8 @@ mountroot()
echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
echo " not specified on the kernel command line."
echo ""
echo "Manually mount the root filesystem on $rootmnt and then exit."
echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
echo "Manually mount the root filesystem on ${rootmnt} and then exit."
echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL:-rpool}/ROOT/system ${rootmnt}"
shell
fi
@ -952,7 +951,7 @@ mountroot()
# * Ideally, the root filesystem would be mounted like this:
#
# zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
# zpool import -R "${rootmnt}" -N "${ZFS_RPOOL}"
# zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
#
# but the MOUNTPOINT prefix is preserved on descendent filesystem
@ -967,14 +966,14 @@ mountroot()
# Go through the complete list (recursively) of all filesystems below
# the real root dataset
filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
OLD_IFS="$IFS" ; IFS="
OLD_IFS="${IFS}" ; IFS="
"
for fs in $filesystems; do
IFS="$OLD_IFS" mount_fs "$fs"
for fs in ${filesystems}; do
IFS="${OLD_IFS}" mount_fs "${fs}"
done
IFS="$OLD_IFS"
for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
mount_fs "$fs"
IFS="${OLD_IFS}"
for fs in ${ZFS_INITRD_ADDITIONAL_DATASETS}; do
mount_fs "${fs}"
done
touch /run/zfs_unlock_complete
@ -1001,8 +1000,8 @@ mountroot()
printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
read -r b
[ "$b" = "c" ] && /bin/sh
[ "$b" = "r" ] && reboot -f
[ "${b}" = "c" ] && /bin/sh
[ "${b}" = "r" ] && reboot -f
set +x
fi
@ -1013,10 +1012,10 @@ mountroot()
then
if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
then
[ "$quiet" != "y" ] && \
[ "${quiet}" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-bottom"
run_scripts /scripts/local-bottom
[ "$quiet" != "y" ] && zfs_log_end_msg
[ "${quiet}" != "y" ] && zfs_log_end_msg
fi
fi
}