# ZFS boot stub for initramfs-tools.
#
# In the initramfs environment, the /init script sources this stub to
# override the default functions in the /scripts/local script.
#
# Enable this by passing boot=zfs on the kernel command line.
#

# Source the common init script
. /etc/zfs/zfs-functions

# Paths to what we need - in the initrd, these paths are hardcoded,
# so override the defines in zfs-functions.
ZFS="/sbin/zfs"
ZPOOL="/sbin/zpool"
ZPOOL_CACHE="/etc/zfs/zpool.cache"
export ZFS ZPOOL ZPOOL_CACHE

# This runs any scripts that should run before we start importing
# pools and mounting any filesystems.
pre_mountroot()
{
	if type run_scripts > /dev/null 2>&1 && \
	    [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
	then
		[ "$quiet" != "y" ] && \
		    zfs_log_begin_msg "Running /scripts/local-top"
		run_scripts /scripts/local-top
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	if type run_scripts > /dev/null 2>&1 && \
	    [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
	then
		[ "$quiet" != "y" ] && \
		    zfs_log_begin_msg "Running /scripts/local-premount"
		run_scripts /scripts/local-premount
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi
}

# If plymouth is availible, hide the splash image.
disable_plymouth()
{
	if [ -x /bin/plymouth ] && /bin/plymouth --ping
	then
		/bin/plymouth hide-splash >/dev/null 2>&1
	fi
}

# Get a ZFS filesystem property value.
get_fs_value()
{
	local fs="$1"
	local value=$2

	"${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
}

# Find the 'bootfs' property on pool $1.
# If the property does not contain '/', then ignore this
# pool by exporting it again.
find_rootfs()
{
	local pool="$1"

	# If 'POOL_IMPORTED' isn't set, no pool imported and therefor
	# we won't be able to find a root fs.
	[ -z "${POOL_IMPORTED}" ] && return 1

	# If it's already specified, just keep it mounted and exit
	# User (kernel command line) must be correct.
	[ -n "${ZFS_BOOTFS}" ] && return 0

	# Not set, try to find it in the 'bootfs' property of the pool.
	# NOTE: zpool does not support 'get -H -ovalue bootfs'...
	ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")

	# Make sure it's not '-' and that it starts with /.
	if [ "${ZFS_BOOTFS}" != "-" ] && \
		$(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
	then
		# Keep it mounted
		POOL_IMPORTED=1
		return 0
	fi

	# Not boot fs here, export it and later try again..
	"${ZPOOL}" export "$pool"
	POOL_IMPORTED=""

	return 1
}

# Support function to get a list of all pools, separated with ';'
find_pools()
{
	local CMD="$*"
	local pools pool

	pools=$($CMD 2> /dev/null | \
		grep -E "pool:|^[a-zA-Z0-9]" | \
		sed 's@.*: @@' | \
		while read pool; do \
		    echo -n "$pool;"
		done)

	echo "${pools%%;}" # Return without the last ';'.
}

# Get a list of all availible pools
get_pools()
{
	local available_pools npools

	if [ -n "${ZFS_POOL_IMPORT}" ]; then
		echo "$ZFS_POOL_IMPORT"
		return 0
	fi

	# Get the base list of availible pools.
	available_pools=$(find_pools "$ZPOOL" import)

	# Just in case - seen it happen (that a pool isn't visable/found
	# with a simple "zpool import" but only when using the "-d"
	# option or setting ZPOOL_IMPORT_PATH).
	if [ -d "/dev/disk/by-id" ]
	then
		npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
		if [ -n "$npools" ]
		then
			# Because we have found extra pool(s) here, which wasn't
			# found 'normaly', we need to force USE_DISK_BY_ID to
			# make sure we're able to actually import it/them later.
			USE_DISK_BY_ID='yes'

			if [ -n "$available_pools" ]
			then
				# Filter out duplicates (pools found with the simple
				# "zpool import" but which is also found with the
				# "zpool import -d ...").
				npools=$(echo "$npools" | sed "s,$available_pools,,")

				# Add the list to the existing list of
				# available pools
				available_pools="$available_pools;$npools"
			else
				available_pools="$npools"
			fi
		fi
	fi

        # Filter out any exceptions...
	if [ -n "$ZFS_POOL_EXCEPTIONS" ]
	then
		local found=""
		local apools=""
		local pool exception
		OLD_IFS="$IFS" ; IFS=";"

		for pool in $available_pools
		do
			for exception in $ZFS_POOL_EXCEPTIONS
			do
				[ "$pool" = "$exception" ] && continue 2
				found="$pool"
			done

			if [ -n "$found" ]
			then
				if [ -n "$apools" ]
				then
					apools="$apools;$pool"
				else
					apools="$pool"
				fi
			fi
		done

		IFS="$OLD_IFS"
		available_pools="$apools"
	fi

	# Return list of availible pools.
	echo "$available_pools"
}

# Import given pool $1
import_pool()
{
	local pool="$1"
	local dirs dir

	# Verify that the pool isn't already imported
	# Make as sure as we can to not require '-f' to import.
	"${ZPOOL}" status "$pool" > /dev/null 2>&1 && return 0

	# For backwards compability, make sure that ZPOOL_IMPORT_PATH is set
	# to something we can use later with the real import(s). We want to
	# make sure we find all by* dirs, BUT by-vdev should be first (if it
	# exists).
	if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
	then
		dirs="$(for dir in $(echo /dev/disk/by-*)
		do
			# Ignore by-vdev here - we want it first!
			echo "$dir" | grep -q /by-vdev && continue
			[ ! -d "$dir" ] && continue

			echo -n "$dir:"
		done | sed 's,:$,,g')"

		if [ -d "/dev/disk/by-vdev" ]
		then
			# Add by-vdev at the beginning.
			ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
		fi

		# ... and /dev at the very end, just for good measure.
		ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
	fi

	# Needs to be exported for "zpool" to catch it.
	[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH


	[ "$quiet" != "y" ] && zfs_log_begin_msg \
		"Importing pool '${pool}' using defaults"

	ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
	ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
	ZFS_ERROR="$?"
	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		if [ -f "${ZPOOL_CACHE}" ]
		then
			[ "$quiet" != "y" ] && zfs_log_begin_msg \
				"Importing pool '${pool}' using cachefile."

			ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
			ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
			ZFS_ERROR="$?"
		fi

		if [ "${ZFS_ERROR}" != 0 ]
		then
			[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

			disable_plymouth
			echo ""
			echo "Command: ${ZFS_CMD} '$pool'"
			echo "Message: $ZFS_STDERR"
			echo "Error: $ZFS_ERROR"
			echo ""
			echo "Failed to import pool '$pool'."
			echo "Manually import the pool and exit."
			/bin/sh
		fi
	fi

	[ "$quiet" != "y" ] && zfs_log_end_msg

	POOL_IMPORTED=1
	return 0
}

# Load ZFS modules
# Loading a module in a initrd require a slightly different approach,
# with more logging etc.
load_module_initrd()
{
	if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
	then
		if [ "$quiet" != "y" ]; then
			zfs_log_begin_msg "Sleeping for" \
				"$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
		fi
		sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
	if type wait_for_udev > /dev/null 2>&1 ; then
		wait_for_udev 10
	elif type wait_for_dev > /dev/null 2>&1 ; then
		wait_for_dev
	fi

	# zpool import refuse to import without a valid mtab
	[ ! -f /proc/mounts ] && mount proc /proc
	[ ! -f /etc/mtab ] && cat /proc/mounts > /etc/mtab

	# Load the module
	load_module "zfs" || return 1

	if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
	then
		if [ "$quiet" != "y" ]; then
			zfs_log_begin_msg "Sleeping for" \
				"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
		fi
		sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Mount a given filesystem
mount_fs()
{
	local fs="$1"
	local mountpoint

	# Check that the filesystem exists
	"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
	[ "$?" -ne 0 ] && return 1

	# Need the _original_ datasets mountpoint!
	mountpoint=$(get_fs_value "$fs" mountpoint)
	if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
		# Can't use the mountpoint property. Might be one of our
		# clones. Check the 'org.zol:mountpoint' property set in
		# clone_snap() if that's usable.
		mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
		if [ "$mountpoint" = "legacy" -o \
		    "$mountpoint" = "none" -o \
		    "$mountpoint" = "-" ]
		then
			if [ "$fs" != "${ZFS_BOOTFS}" ]; then
				# We don't have a proper mountpoint, this
				# isn't the root fs. So extract the root fs
				# value from the filesystem, and we should
				# (hopefully!) have a mountpoint we can use.
				mountpoint="${fs##$ZFS_BOOTFS}"
			else
				# Last hail-mary: Hope 'rootmnt' is set!
				mountpoint=""
			fi
		fi

		if [ "$mountpoint" = "legacy" ]; then
			ZFS_CMD="mount -t zfs"
		else
			# If it's not a legacy filesystem, it can only be a
			# native one...
			ZFS_CMD="mount -o zfsutil -t zfs"
		fi
	else
		ZFS_CMD="mount -o zfsutil -t zfs"
	fi

	# Possibly decrypt a filesystem using native encryption.
	decrypt_fs "$fs"

	[ "$quiet" != "y" ] && \
	    zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
	[ -n "${ZFS_DEBUG}" ] && \
	    zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"

	ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
	ZFS_ERROR=$?
	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		disable_plymouth
		echo ""
		echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
		echo "Manually mount the filesystem and exit."
		/bin/sh
	else
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Unlock a ZFS native crypted filesystem.
decrypt_fs()
{
	local fs="$1"

	# If the 'zfs key' command isn't availible, exit right here.
	"${ZFS}" 2>&1 | grep -q 'key -l ' || return 0

	# Check if filesystem is encrypted. If not, exit right here.
	[ "$(get_fs_value "$fs" encryption)" != "off" ] || return 0

	[ "$quiet" != "y" ] && \
	    zfs_log_begin_msg "Loading crypto wrapper key for $fs"

	# Just make sure that ALL crypto modules module is loaded.
	# Simplest just to load all...
	for mod in sun-ccm sun-gcm sun-ctr
	do
		[ "$quiet" != "y" ] && zfs_log_progress_msg "${mod} "

		ZFS_CMD="load_module $mod"
		ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
		ZFS_ERROR="$?"

		if [ "${ZFS_ERROR}" != 0 ]
		then
			[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

			disable_plymouth
			echo ""
			echo "Command: $ZFS_CMD"
			echo "Message: $ZFS_STDERR"
			echo "Error: $ZFS_ERROR"
			echo ""
			echo "Failed to load $mod module."
			echo "Please verify that it is availible on the initrd image"
			echo "(without it it won't be possible to unlock the filesystem)"
			echo "and rerun:  $ZFS_CMD"
			/bin/sh
		else
			[ "$quiet" != "y" ] && zfs_log_end_msg
		fi
	done

	# If the key isn't availible, then this will fail!
	ZFS_CMD="${ZFS} key -l -r $fs"
	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
	ZFS_ERROR="$?"

	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		disable_plymouth
		echo ""
		echo "Command: $ZFS_CMD"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "Failed to load zfs encryption wrapper key (s)."
		echo "Please verify dataset property 'keysource' for datasets"
		echo "and rerun:  $ZFS_CMD"
		/bin/sh
	else
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Destroy a given filesystem.
destroy_fs()
{
	local fs="$1"

	[ "$quiet" != "y" ] && \
	    zfs_log_begin_msg "Destroying '$fs'"

	ZFS_CMD="${ZFS} destroy $fs"
	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
	ZFS_ERROR="$?"
	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		disable_plymouth
		echo ""
		echo "Command: $ZFS_CMD"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "Failed to destroy '$fs'. Please make sure that '$fs' is not availible."
		echo "Hint: Try:  zfs destroy -Rfn $fs"
		echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
		/bin/sh
	else
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Clone snapshot $1 to destination filesystem $2
# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
# manual controll over it's mounting (i.e., make sure it's not automatically
# mounted with a 'zfs mount -a' in the init/systemd scripts).
clone_snap()
{
	local snap="$1"
	local destfs="$2"
	local mountpoint="$3"

	[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"

	# Clone the snapshot into a dataset we can boot from
	# + We don't want this filesystem to be automatically mounted, we
	#   want controll over this here and nowhere else.
	# + We don't need any mountpoint set for the same reason.
	# We use the 'org.zol:mountpoint' property to remember the mountpoint.
	ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
	ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
	ZFS_CMD="${ZFS_CMD} $snap $destfs"
	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
	ZFS_ERROR="$?"
	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		disable_plymouth
		echo ""
		echo "Command: $ZFS_CMD"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "Failed to clone snapshot."
		echo "Make sure that the any problems are corrected and then make sure"
		echo "that the dataset '$destfs' exists and is bootable."
		/bin/sh
	else
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Rollback a given snapshot.
rollback_snap()
{
	local snap="$1"

	[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"

	ZFS_CMD="${ZFS} rollback -Rf $snap"
	ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
	ZFS_ERROR="$?"
	if [ "${ZFS_ERROR}" != 0 ]
	then
		[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"

		disable_plymouth
		echo ""
		echo "Command: $ZFS_CMD"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "Failed to rollback snapshot."
		/bin/sh
	else
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	return 0
}

# Get a list of snapshots, give them as a numbered list
# to the user to choose from.
ask_user_snap()
{
	local fs="$1"
	local i=1
	local SNAP snapnr snap debug

	# We need to temporarily disable debugging. Set 'debug' so we
	# remember to enabled it again.
	if [ -n "${ZFS_DEBUG}" ]; then
		unset ZFS_DEBUG
		set +x
		debug=1
	fi

	# Because we need the resulting snapshot, which is sent on
	# stdout to the caller, we use stderr for our questions.
	echo "What snapshot do you want to boot from?" > /dev/stderr
	while read snap; do
	    echo "  $i: ${snap}" > /dev/stderr
	    eval `echo SNAP_$i=$snap`
	    i=$((i + 1))
	done <<EOT
$("${ZFS}" list -H -oname -tsnapshot "${fs}")
EOT

	echo -n "  Snap nr [0-$((i-1))]? " > /dev/stderr
	read snapnr

	# Reenable debugging.
	if [ -n "${debug}" ]; then
		ZFS_DEBUG=1
		set -x
	fi

	echo "$(eval echo "$"SNAP_$snapnr)"
}

setup_snapshot_booting()
{
	local snap="$1"
	local s destfs subfs mountpoint retval=0 filesystems fs

	# Make sure that the snapshot specified actually exist.
	if [ ! $(get_fs_value "${snap}" type) ]
	then
		# Snapshot does not exist (...@<null> ?)
		# ask the user for a snapshot to use.
		snap="$(ask_user_snap "${snap%%@*}")"
	fi

	# Separate the full snapshot ('$snap') into it's filesystem and
	# snapshot names. Would have been nice with a split() function..
	rootfs="${snap%%@*}"
	snapname="${snap##*@}"	
	ZFS_BOOTFS="${rootfs}_${snapname}"

	if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
	then
		# If the destination dataset for the clone
		# already exists, destroy it. Recursivly
		if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
			filesystems=$("${ZFS}" list -oname -tfilesystem -H \
			    -r -Sname "${ZFS_BOOTFS}")
			for fs in $filesystems; do
				destroy_fs "${fs}"
			done
		fi
	fi

	# Get all snapshots, recursivly (might need to clone /usr, /var etc
	# as well).
	for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
	    grep "${snapname}")
	do
		if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
		then
			# Rollback snapshot
			rollback_snap "$s" || retval=$((retval + 1))
		else
			# Setup a destination filesystem name.
			# Ex: Called with 'rpool/ROOT/debian@snap2'
			#       rpool/ROOT/debian@snap2		=> rpool/ROOT/debian_snap2
			#       rpool/ROOT/debian/boot@snap2	=> rpool/ROOT/debian_snap2/boot
			#       rpool/ROOT/debian/usr@snap2	=> rpool/ROOT/debian_snap2/usr
			#       rpool/ROOT/debian/var@snap2	=> rpool/ROOT/debian_snap2/var
			subfs="${s##$rootfs}"
			subfs="${subfs%%@$snapname}"

			destfs="${rootfs}_${snapname}" # base fs.
			[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.

			# Get the mountpoint of the filesystem, to be used
			# with clone_snap(). If legacy or none, then use
			# the sub fs value.
			mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
			if [ "$mountpoint" = "legacy" -o \
			    "$mountpoint" = "none" ]
			then
				if [ -n "${subfs}" ]; then
					mountpoint="${subfs}"
				else
					mountpoint="/"
				fi
			fi

			# Clone the snapshot into its own
			# filesystem
			clone_snap "$s" "${destfs}" "${mountpoint}" || \
			    retval=$((retval + 1))
		fi
	done

	# If we haven't return yet, we have a problem...
	return "${retval}"
}

# ================================================================

# This is the main function.
mountroot()
{
	local snaporig snapsub destfs pool POOLS

	# ----------------------------------------------------------------
	# I N I T I A L   S E T U P

	# ------------
	# Run the pre-mount scripts from /scripts/local-top.
	pre_mountroot

	# ------------
	# Source the default setup variables.
	[ -r '/etc/default/zfs' ] && . /etc/default/zfs

	# ------------
	# Support debug option
	if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
	then
		ZFS_DEBUG=1
		mkdir /var/log
		#exec 2> /var/log/boot.debug
		set -x
	fi

	# ------------
	# Load ZFS module etc.
	if ! load_module_initrd; then
		disable_plymouth
		echo ""
		echo "Failed to load ZFS modules."
		echo "Manually load the modules and exit."
		/bin/sh
	fi

	# ------------
	# Look for the cache file (if any).
	[ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE

	# ------------
	# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
	#		 'root' is for Redhat/Fedora (etc),
	#		 'REAL_ROOT' is for Gentoo
	if [ -z "$ROOT" ]
	then
		[ -n "$root" ] && ROOT=${root}

		[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
	fi

	# ------------
	# Where to mount the root fs in the initrd - set outside this script
	# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
	#		 'NEWROOT' is for RedHat/Fedora (etc),
	#		 'NEW_ROOT' is for Gentoo
	if [ -z "$rootmnt" ]
	then
		[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}

		[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
	fi

	# ------------
	# No longer set in the defaults file, but it could have been set in
	# get_pools() in some circumstances. If it's something, but not 'yes',
	# it's no good to us.
	[ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
	    unset USE_DISK_BY_ID

	# ----------------------------------------------------------------
	# P A R S E   C O M M A N D   L I N E   O P T I O N S

	# This part is the really ugly part - there's so many options and permutations
	# 'out there', and if we should make this the 'primary' source for ZFS initrd
	# scripting, we need/should support them all.
	#
	# Supports the following kernel command line argument combinations
	# (in this order - first match win):
	#
	#	rpool=<pool>			(tries to finds bootfs automatically)
	#	bootfs=<pool>/<dataset>		(uses this for rpool - first part)
	#	rpool=<pool> bootfs=<pool>/<dataset>
	#	-B zfs-bootfs=<pool>/<fs>	(uses this for rpool - first part)
	#	rpool=rpool			(default if none of the above is used)
	#	root=<pool>/<dataset>		(uses this for rpool - first part)
	#	root=ZFS=<pool>/<dataset>	(uses this for rpool - first part, without 'ZFS=')
	#	root=zfs:AUTO			(tries to detect both pool and rootfs
	#	root=zfs:<pool>/<dataset>	(uses this for rpool - first part, without 'zfs:')
	#
	# Option <dataset> could also be <snapshot>

	# ------------
	# Support force option
	# In addition, setting one of zfs_force, zfs.force or zfsforce to
	# 'yes', 'on' or '1' will make sure we force import the pool.
	# This should (almost) never be needed, but it's here for
	# completeness.
	ZPOOL_FORCE=""
	if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
	then
		ZPOOL_FORCE="-f"
	fi

	# ------------
	# Look for 'rpool' and 'bootfs' parameter
	[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
	[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"

	# ------------
	# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
	# 'ROOT'
	[ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"

	# ------------
	# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
	# NOTE: Only use the pool name and dataset. The rest is not
	#       supported by ZoL (whatever it's for).
	if [ -z "$ZFS_RPOOL" ]
	then
		# The ${zfs-bootfs} variable is set at the kernel commmand
		# line, usually by GRUB, but it cannot be referenced here
		# directly because bourne variable names cannot contain a
		# hyphen.
		#
		# Reassign the variable by dumping the environment and
		# stripping the zfs-bootfs= prefix.  Let the shell handle
		# quoting through the eval command.
		eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
	fi

	# ------------
	# No root fs or pool specified - do auto detect.
	if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
	then
		# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
		# which will be caught later
		ROOT=zfs:AUTO
	fi

	# ----------------------------------------------------------------
	# F I N D   A N D   I M P O R T   C O R R E C T   P O O L

	# ------------
	if [ "$ROOT" = "zfs:AUTO" ]
	then
		# Try to detect both pool and root fs.

		[ "$quiet" != "y" ] && \
		    zfs_log_begin_msg "Attempting to import additional pools."

		# Get a list of pools available for import
		if [ -n "$ZFS_RPOOL" ]
		then
			# We've specified a pool - check only that
			POOLS=$ZFS_RPOOL
		else
			POOLS=$(get_pools)
		fi

		OLD_IFS="$IFS" ; IFS=";"
		for pool in $POOLS
		do
			[ -z "$pool" ] && continue

			import_pool "$pool"
			find_rootfs "$pool"
		done
		IFS="$OLD_IFS"

		[ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
	else
		# No auto - use value from the command line option.

		# Strip 'zfs:' and 'ZFS='.
		ZFS_BOOTFS="${ROOT#*[:=]}"

		# Stip everything after the first slash.
		ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
	fi

	# Import the pool (if not already done so in the AUTO check above).
	if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
	then
		[ "$quiet" != "y" ] && \
		    zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"

		import_pool "${ZFS_RPOOL}"
		find_rootfs "${ZFS_RPOOL}"

		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi

	if [ -z "${POOL_IMPORTED}" ]
	then
		# No pool imported, this is serious!
		disable_plymouth
		echo ""
		echo "Command: $ZFS_CMD"
		echo "Message: $ZFS_STDERR"
		echo "Error: $ZFS_ERROR"
		echo ""
		echo "No pool imported. Manually import the root pool"
		echo "at the command prompt and then exit."
		echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
		/bin/sh
	fi

	# ----------------------------------------------------------------
	# P R E P A R E   R O O T   F I L E S Y S T E M

	if [ -n "${ZFS_BOOTFS}" ]
	then
		# Booting from a snapshot?
		# Will overwrite the ZFS_BOOTFS variable like so:
		#   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
		echo "${ZFS_BOOTFS}" | grep -q '@' && \
		    setup_snapshot_booting "${ZFS_BOOTFS}"
	fi

	if [ -z "${ZFS_BOOTFS}" ]
	then
		# Still nothing! Let the user sort this out.
		disable_plymouth
		echo ""
		echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
		echo "       not specified on the kernel command line."
		echo ""
		echo "Manually mount the root filesystem on $rootmnt and then exit."
		echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
		/bin/sh
	fi

	# ----------------------------------------------------------------
	# M O U N T   F I L E S Y S T E M S

	# * Ideally, the root filesystem would be mounted like this:
	#
	#     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
	#     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
	#
	#   but the MOUNTPOINT prefix is preserved on descendent filesystem
	#   after the pivot into the regular root, which later breaks things
	#   like `zfs mount -a` and the /etc/mtab refresh.
	#
	# * Mount additional filesystems required
	#   Such as /usr, /var, /usr/local etc.
	#   NOTE: Mounted in the order specified in the
	#         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!

	# Go through the complete list (recursivly) of all filesystems below
	# the real root dataset
	filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
	for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
	do
		mount_fs "$fs"
	done

	# ------------
	# Debugging information
	if [ -n "${ZFS_DEBUG}" ]
	then
		#exec 2>&1-

		echo "DEBUG: imported pools:"
		"${ZPOOL}" list -H
		echo

		echo "DEBUG: mounted ZFS filesystems:"
		mount | grep zfs
		echo

		echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
		echo -n "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
		read b

		[ "$b" = "c" ] && /bin/sh
		[ "$b" = "r" ] && reboot -f

		set +x
	fi

	# ------------
	# Run local bottom script
	if type run_scripts > /dev/null 2>&1 && \
	    [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
	then
		[ "$quiet" != "y" ] && \
		    zfs_log_begin_msg "Running /scripts/local-bottom"
		run_scripts /scripts/local-bottom
		[ "$quiet" != "y" ] && zfs_log_end_msg
	fi
}