diff --git a/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in b/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in index 348c8d67a..c1513cf3a 100755 --- a/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in +++ b/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in @@ -44,9 +44,10 @@ case "${ZEVENT_HISTORY_INTERNAL_NAME}" in ;; set|inherit) - # Only act if the mountpoint or canmount setting is altered. - case "${ZEVENT_HISTORY_INTERNAL_STR}" in - canmount=*|mountpoint=*) ;; + # Only act if one of the tracked properties is altered. + case "${ZEVENT_HISTORY_INTERNAL_STR%%=*}" in + canmount|mountpoint|atime|relatime|devices|exec| \ + readonly|setuid|nbmand) ;; *) exit 0 ;; esac ;; @@ -60,8 +61,10 @@ esac zed_lock zfs-list trap abort_alter EXIT -"${ZFS}" list -H -tfilesystem -oname,mountpoint,canmount -r "${ZEVENT_POOL}" \ - >"${FSLIST_TMP}" +PROPS="name,mountpoint,canmount,atime,relatime,devices,exec,readonly" +PROPS="${PROPS},setuid,nbmand" + +"${ZFS}" list -H -t filesystem -o $PROPS -r "${ZEVENT_POOL}" > "${FSLIST_TMP}" # Sort the output so that it is stable sort "${FSLIST_TMP}" -o "${FSLIST_TMP}" diff --git a/etc/systemd/system-generators/zfs-mount-generator.in b/etc/systemd/system-generators/zfs-mount-generator.in index e39a03036..08d0e1a55 100755 --- a/etc/systemd/system-generators/zfs-mount-generator.in +++ b/etc/systemd/system-generators/zfs-mount-generator.in @@ -49,38 +49,123 @@ req_dir="${dest_norm}/local-fs.target.wants/" mkdir -p "${req_dir}" # All needed information about each ZFS is available from -# zfs list -H -t filesystem -oname,mountpoint,canmount +# zfs list -H -t filesystem -o # cached in $FSLIST, and each line is processed by the following function: +# See the list below for the properties and their order process_line() { + # -o name + dataset="${1}" + p_mountpoint="${2}" + p_canmount="${3}" + p_atime="${4}" + p_relatime="${5}" + p_devices="${6}" + p_exec="${7}" + p_readonly="${8}" + p_setuid="${9}" + p_nbmand="${10}" + # Check for canmount=off . - if [ "${3}" = "off" ] ; then + if [ "${p_canmount}" = "off" ] ; then return - elif [ "${3}" = "noauto" ] ; then + elif [ "${p_canmount}" = "noauto" ] ; then # Don't let a noauto marked mountpoint block an "auto" market mountpoint return - elif [ "${3}" = "on" ] ; then + elif [ "${p_canmount}" = "on" ] ; then : # This is OK else do_fail "invalid canmount" fi # Check for legacy and blank mountpoints. - if [ "${2}" = "legacy" ] ; then + if [ "${p_mountpoint}" = "legacy" ] ; then return - elif [ "${2}" = "none" ] ; then + elif [ "${p_mountpoint}" = "none" ] ; then return - elif [ "${2%"${2#?}"}" != "/" ] ; then + elif [ "${p_mountpoint%"${p_mountpoint#?}"}" != "/" ] ; then do_fail "invalid mountpoint $*" fi # Escape the mountpoint per systemd policy. - mountfile="$(systemd-escape "${2#?}").mount" + mountfile="$(systemd-escape "${p_mountpoint#?}").mount" + + # Parse options + # see lib/libzfs/libzfs_mount.c:zfs_add_options + opts="" + + # atime + if [ "${p_atime}" = on ] ; then + # relatime + if [ "${p_relatime}" = on ] ; then + opts="${opts},atime,relatime" + elif [ "${p_relatime}" = off ] ; then + opts="${opts},atime,strictatime" + else + printf 'zfs-mount-generator.sh: (%s) invalid relatime\n' \ + "${dataset}" >/dev/kmsg + fi + elif [ "${p_atime}" = off ] ; then + opts="${opts},noatime" + else + printf 'zfs-mount-generator.sh: (%s) invalid atime\n' \ + "${dataset}" >/dev/kmsg + fi + + # devices + if [ "${p_devices}" = on ] ; then + opts="${opts},dev" + elif [ "${p_devices}" = off ] ; then + opts="${opts},nodev" + else + printf 'zfs-mount-generator.sh: (%s) invalid devices\n' \ + "${dataset}" >/dev/kmsg + fi + + # exec + if [ "${p_exec}" = on ] ; then + opts="${opts},exec" + elif [ "${p_exec}" = off ] ; then + opts="${opts},noexec" + else + printf 'zfs-mount-generator.sh: (%s) invalid exec\n' \ + "${dataset}" >/dev/kmsg + fi + + # readonly + if [ "${p_readonly}" = on ] ; then + opts="${opts},ro" + elif [ "${p_readonly}" = off ] ; then + opts="${opts},rw" + else + printf 'zfs-mount-generator.sh: (%s) invalid readonly\n' \ + "${dataset}" >/dev/kmsg + fi + + # setuid + if [ "${p_setuid}" = on ] ; then + opts="${opts},suid" + elif [ "${p_setuid}" = off ] ; then + opts="${opts},nosuid" + else + printf 'zfs-mount-generator.sh: (%s) invalid setuid\n' \ + "${dataset}" >/dev/kmsg + fi + + # nbmand + if [ "${p_nbmand}" = on ] ; then + opts="${opts},mand" + elif [ "${p_nbmand}" = off ] ; then + opts="${opts},nomand" + else + printf 'zfs-mount-generator.sh: (%s) invalid nbmand\n' \ + "${dataset}" >/dev/kmsg + fi # If the mountpoint has already been created, give it precedence. if [ -e "${dest_norm}/${mountfile}" ] ; then - printf 'zfs-mount-generator.sh: %s.mount already exists\n' "${2}" \ + printf 'zfs-mount-generator.sh: %s already exists\n' "${mountfile}" \ >/dev/kmsg return fi @@ -97,14 +182,14 @@ After=zfs-import.target Wants=zfs-import.target [Mount] -Where=${2} -What=${1} +Where=${p_mountpoint} +What=${dataset} Type=zfs -Options=zfsutil,auto +Options=defaults${opts},zfsutil EOF - # Finally, create the appropriate dependencies based on the ZFS properties. - [ "$3" = "on" ] & ln -s "../${mountfile}" "${req_dir}" + # Finally, create the appropriate dependency + ln -s "../${mountfile}" "${req_dir}" } # Feed each line into process_line diff --git a/man/man8/Makefile.am b/man/man8/Makefile.am index b6408ddf2..153cd518f 100644 --- a/man/man8/Makefile.am +++ b/man/man8/Makefile.am @@ -4,7 +4,6 @@ dist_man_MANS = \ vdev_id.8 \ zdb.8 \ zfs.8 \ - zfs-mount-generator.8 \ zfs-program.8 \ zgenhostid.8 \ zinject.8 \ @@ -12,24 +11,18 @@ dist_man_MANS = \ zstreamdump.8 nodist_man_MANS = \ - zed.8 + zed.8 \ + zfs-mount-generator.8 EXTRA_DIST = \ - zed.8.in + zed.8.in \ + zfs-mount-generator.8.in -zed.8: $(srcdir)/zed.8.in - -do_subst = $(SED) \ - -e 's|@libexecdir[@]|$(libexecdir)|g' \ - -e 's|@runstatedir[@]|$(runstatedir)|g' \ - -e 's|@sysconfdir[@]|$(sysconfdir)|g' - -$(nodist_man_MANS): Makefile - $(RM) $@ $@.tmp - srcdir=''; \ - test -f ./$@.in || srcdir=$(srcdir)/; \ - $(do_subst) $${srcdir}$@.in >$@.tmp - mv $@.tmp $@ +$(nodist_man_MANS): %: %.in + -$(SED) -e 's,@libexecdir\@,$(libexecdir),g' \ + -e 's,@runstatedir\@,$(runstatedir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + $< >'$@' install-data-local: $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man8" diff --git a/man/man8/zfs-mount-generator.8 b/man/man8/zfs-mount-generator.8.in similarity index 61% rename from man/man8/zfs-mount-generator.8 rename to man/man8/zfs-mount-generator.8.in index af471e7c9..319ac8e57 100644 --- a/man/man8/zfs-mount-generator.8 +++ b/man/man8/zfs-mount-generator.8.in @@ -1,11 +1,11 @@ .TH "ZFS\-MOUNT\-GENERATOR" "8" "ZFS" "zfs-mount-generator" "\"" .SH "NAME" -zfs\-mount\-generator \- generates systemd mount units for zfs +zfs\-mount\-generator \- generates systemd mount units for ZFS .SH SYNOPSIS .B /lib/systemd/system-generators/zfs\-mount\-generator .sp .SH DESCRIPTION -The zfs\-mount\-generator implements the \fBGenerators Specification\fP +zfs\-mount\-generator implements the \fBGenerators Specification\fP of .BR systemd (1), and is called during early boot to generate @@ -26,7 +26,7 @@ information on ZFS mountpoints must be stored separately. The output of the command .PP .RS 4 -zfs list -H -oname,mountpoint,canmount +zfs list -H -o name,mountpoint,canmount,atime,relatime,devices,exec,readonly,setuid,nbmand .RE .PP for datasets that should be mounted by systemd, should be kept @@ -45,6 +45,33 @@ history_event-zfs-list-cacher.sh . .RE .PP .sp +.SH EXAMPLE +To begin, enable tracking for the pool: +.PP +.RS 4 +touch +.RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME +.RE +.PP +Then, enable the tracking ZEDLET: +.PP +.RS 4 +ln -s "@libexecdir@/zfs/zed.d/history_event-zfs-list-cacher.sh" "@sysconfdir@/zfs/zed.d/" + +systemctl enable zed.service + +systemctl restart zed.service +.RE +.PP +Force the running of the ZEDLET by setting canmount=on for at least one dataset in the pool: +.PP +.RS 4 +zfs set canmount=on +.I DATASET +.RE +.PP +This forces an update to the stale cache file. +.sp .SH SEE ALSO .BR zfs (5) .BR zfs-events (5)