mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2025-08-06 15:07:39 +03:00
Compare commits
150 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
bc06d8164b | ||
![]() |
76745cf5b8 | ||
![]() |
0c88ae6187 | ||
![]() |
307fd0da1f | ||
![]() |
9f1c5e0b10 | ||
![]() |
5ba50c8135 | ||
![]() |
25565403aa | ||
![]() |
47b7dc976b | ||
![]() |
125731436d | ||
![]() |
4425a7bb85 | ||
![]() |
e47b033eae | ||
![]() |
cfec8f13a2 | ||
![]() |
997db7a7fc | ||
![]() |
e411081aa0 | ||
![]() |
a55b6fe94a | ||
![]() |
939e9f0b6a | ||
![]() |
679b164cd3 | ||
![]() |
c2d9494f99 | ||
![]() |
b952e061df | ||
![]() |
0fea7fc109 | ||
![]() |
0f6d955a35 | ||
![]() |
c3d2412b05 | ||
![]() |
74064cb175 | ||
![]() |
30b97ce218 | ||
![]() |
9519e7ebcc | ||
![]() |
f9b02fe7e3 | ||
![]() |
cb8da70329 | ||
![]() |
c944c46a98 | ||
![]() |
166a7bc602 | ||
![]() |
e90124a7c8 | ||
![]() |
18b3bea861 | ||
![]() |
d67eb17e27 | ||
![]() |
1862c1c0a8 | ||
![]() |
b57f53036d | ||
![]() |
4b8bf3c48a | ||
![]() |
696943533c | ||
![]() |
2284a61129 | ||
![]() |
e1833a72f9 | ||
![]() |
5bb034f533 | ||
![]() |
1e08e49a28 | ||
![]() |
6604fe9a06 | ||
![]() |
7cbe7bbbd4 | ||
![]() |
2dcc8fe035 | ||
![]() |
38875918d8 | ||
![]() |
0d51852ec7 | ||
![]() |
73a73cba71 | ||
![]() |
87947f2440 | ||
![]() |
0e87150b6c | ||
![]() |
7742e29387 | ||
![]() |
f54052a122 | ||
![]() |
d874f27776 | ||
![]() |
84d7d53e91 | ||
![]() |
d90042dedb | ||
![]() |
0e46085ee6 | ||
![]() |
3b0c1131ef | ||
![]() |
5988de77b0 | ||
![]() |
00debc1361 | ||
![]() |
af10714e42 | ||
![]() |
747781a345 | ||
![]() |
b17ea73f9d | ||
![]() |
17cdb7a2b1 | ||
![]() |
b673bcba4d | ||
![]() |
d8886275df | ||
![]() |
1aee375947 | ||
![]() |
a1907b038a | ||
![]() |
d359f7f547 | ||
![]() |
90603601b4 | ||
![]() |
ecd0b1528e | ||
![]() |
3ed1d608a8 | ||
![]() |
c165daa0b1 | ||
![]() |
1a5414ba2f | ||
![]() |
409aad3f33 | ||
![]() |
1917c26944 | ||
![]() |
2b64d41be8 | ||
![]() |
9753feaa63 | ||
![]() |
3c0b8da206 | ||
![]() |
d7abeef621 | ||
![]() |
7fb7eb9a63 | ||
![]() |
3f9af023f6 | ||
![]() |
f7675ae30f | ||
![]() |
920603990a | ||
![]() |
9a4b2f08d3 | ||
![]() |
8023d9d4b5 | ||
![]() |
23f063d2e6 | ||
![]() |
18474efeec | ||
![]() |
260065099e | ||
![]() |
4c9f2cec46 | ||
![]() |
ee3677d321 | ||
![]() |
0274a9a57d | ||
![]() |
025f8b2e74 | ||
![]() |
37e8f3ae17 | ||
![]() |
7313c6e382 | ||
![]() |
1c6b0302ef | ||
![]() |
7e3af4658b | ||
![]() |
1a54b13aaf | ||
![]() |
9061a4da0b | ||
![]() |
e12d76176d | ||
![]() |
8131793d6f | ||
![]() |
c82eb27b22 | ||
![]() |
661bb434e6 | ||
![]() |
ae48c2f6a9 | ||
![]() |
b96845b632 | ||
![]() |
55cbd1f9bd | ||
![]() |
880b73956b | ||
![]() |
d367ef2995 | ||
![]() |
7546fbd6e9 | ||
![]() |
903d3f9187 | ||
![]() |
86b5853cfb | ||
![]() |
19a8dd48e1 | ||
![]() |
8ac70aade7 | ||
![]() |
bbc0d34bfd | ||
![]() |
f3823a9ab2 | ||
![]() |
fd2cae969f | ||
![]() |
f7b4bca66a | ||
![]() |
7e3ce4efaa | ||
![]() |
77d81974b6 | ||
![]() |
5237760b17 | ||
![]() |
ede715d1e4 | ||
![]() |
e30c69365d | ||
![]() |
78d39d91fa | ||
![]() |
2b359c7824 | ||
![]() |
ace2e17a9b | ||
![]() |
b4cd10ce5b | ||
![]() |
f52d7aaaac | ||
![]() |
d5db840260 | ||
![]() |
bcd61d9579 | ||
![]() |
36a67b50a2 | ||
![]() |
3d9129a7b6 | ||
![]() |
0409c47fe0 | ||
![]() |
b5a3825244 | ||
![]() |
77df762a1b | ||
![]() |
56871e465a | ||
![]() |
c645b07eaa | ||
![]() |
5bc27acf51 | ||
![]() |
7f830d783b | ||
![]() |
58162960a1 | ||
![]() |
666903610d | ||
![]() |
26ecd8b993 | ||
![]() |
774dcba86d | ||
![]() |
09f6b2ebe3 | ||
![]() |
2609d93b65 | ||
![]() |
10f46d2aba | ||
![]() |
0df10dc911 | ||
![]() |
0fbe9d352c | ||
![]() |
84f44ec07f | ||
![]() |
fc9608e2e6 | ||
![]() |
d32c05949a | ||
![]() |
1ebb6b866f | ||
![]() |
f019b445f3 | ||
![]() |
03822a61be |
10
.cirrus.yml
10
.cirrus.yml
@ -5,16 +5,16 @@ env:
|
||||
build_task:
|
||||
matrix:
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-5
|
||||
image_family: freebsd-12-4
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-2
|
||||
image_family: freebsd-13-2
|
||||
freebsd_instance:
|
||||
image_family: freebsd-15-0-snap
|
||||
image_family: freebsd-14-0-snap
|
||||
prepare_script:
|
||||
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py311-packaging py311-cffi py311-sysctl
|
||||
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py39-packaging py39-cffi py39-sysctl
|
||||
configure_script:
|
||||
- env MAKE=gmake ./autogen.sh
|
||||
- env MAKE=gmake ./configure --with-config="user" --with-python=3.11
|
||||
- env MAKE=gmake ./configure --with-config="user" --with-python=3.9
|
||||
build_script:
|
||||
- gmake -j `sysctl -n kern.smp.cpus`
|
||||
install_script:
|
||||
|
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -27,7 +27,6 @@ https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Performance enhancement (non-breaking change which improves efficiency)
|
||||
- [ ] Code cleanup (non-breaking change which makes code smaller or more readable)
|
||||
- [ ] Quality assurance (non-breaking change which makes the code more robust against bugs)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
- [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv)
|
||||
- [ ] Documentation (a change to man pages or other documentation)
|
||||
|
2
.github/workflows/checkstyle.yaml
vendored
2
.github/workflows/checkstyle.yaml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
run: |
|
||||
# for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu22
|
||||
sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck
|
||||
sudo python -m pipx install --quiet flake8
|
||||
# confirm that the tools are installed
|
||||
|
49
.github/workflows/labels.yml
vendored
49
.github/workflows/labels.yml
vendored
@ -1,49 +0,0 @@
|
||||
name: labels
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
open:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --add-label "Status: Work in Progress"
|
||||
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale"
|
||||
|
||||
draft:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'converted_to_draft' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress"
|
||||
|
||||
rfr:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'ready_for_review' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed"
|
@ -29,7 +29,6 @@ FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
|
||||
Patterns of files that are considered to trigger full CI.
|
||||
"""
|
||||
FULL_RUN_REGEX = list(map(re.compile, [
|
||||
r'\.github/workflows/scripts/.*',
|
||||
r'cmd.*',
|
||||
r'configs/.*',
|
||||
r'META',
|
||||
|
44
.github/workflows/scripts/qemu-1-setup.sh
vendored
44
.github/workflows/scripts/qemu-1-setup.sh
vendored
@ -10,12 +10,36 @@ set -eu
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
|
||||
virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
||||
ksmtuned virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
||||
|
||||
# generate ssh keys
|
||||
rm -f ~/.ssh/id_ed25519
|
||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
||||
|
||||
# we expect RAM shortage
|
||||
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
|
||||
# /etc/ksmtuned.conf - Configuration file for ksmtuned
|
||||
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
|
||||
KSM_MONITOR_INTERVAL=60
|
||||
|
||||
# Millisecond sleep between ksm scans for 16Gb server.
|
||||
# Smaller servers sleep more, bigger sleep less.
|
||||
KSM_SLEEP_MSEC=30
|
||||
|
||||
KSM_NPAGES_BOOST=0
|
||||
KSM_NPAGES_DECAY=0
|
||||
KSM_NPAGES_MIN=1000
|
||||
KSM_NPAGES_MAX=25000
|
||||
|
||||
KSM_THRES_COEF=80
|
||||
KSM_THRES_CONST=8192
|
||||
|
||||
LOGFILE=/var/log/ksmtuned.log
|
||||
DEBUG=1
|
||||
EOF
|
||||
sudo systemctl restart ksm
|
||||
sudo systemctl restart ksmtuned
|
||||
|
||||
# not needed
|
||||
sudo systemctl stop docker.socket
|
||||
sudo systemctl stop multipathd.socket
|
||||
@ -41,14 +65,16 @@ $DISK
|
||||
sync
|
||||
sleep 1
|
||||
|
||||
# swap with same size as RAM (16GiB)
|
||||
# swap with same size as RAM
|
||||
sudo mkswap $DISK-part1
|
||||
sudo swapon $DISK-part1
|
||||
|
||||
# JBOD 2xdisk for OpenZFS storage (test vm's)
|
||||
# 60GB data disk
|
||||
SSD1="$DISK-part2"
|
||||
sudo fallocate -l 12G /test.ssd2
|
||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)
|
||||
|
||||
# 10GB data disk on ext4
|
||||
sudo fallocate -l 10G /test.ssd1
|
||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd1 --show)
|
||||
|
||||
# adjust zfs module parameter and create pool
|
||||
exec 1>/dev/null
|
||||
@ -57,11 +83,11 @@ ARC_MAX=$((1024*1024*512))
|
||||
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
|
||||
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
|
||||
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
|
||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
|
||||
-O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
|
||||
-O redundant_metadata=none -O mountpoint=/mnt/tests
|
||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 \
|
||||
-O relatime=off -O atime=off -O xattr=sa -O compression=lz4 \
|
||||
-O mountpoint=/mnt/tests
|
||||
|
||||
# no need for some scheduler
|
||||
for i in /sys/block/s*/queue/scheduler; do
|
||||
echo "none" | sudo tee $i
|
||||
echo "none" | sudo tee $i > /dev/null
|
||||
done
|
||||
|
189
.github/workflows/scripts/qemu-2-start.sh
vendored
189
.github/workflows/scripts/qemu-2-start.sh
vendored
@ -12,15 +12,15 @@ OS="$1"
|
||||
# OS variant (virt-install --os-variant list)
|
||||
OSv=$OS
|
||||
|
||||
# FreeBSD urls's
|
||||
FREEBSD_REL="https://download.freebsd.org/releases/CI-IMAGES"
|
||||
FREEBSD_SNAP="https://download.freebsd.org/snapshots/CI-IMAGES"
|
||||
URLxz=""
|
||||
# compressed with .zst extension
|
||||
REPO="https://github.com/mcmilk/openzfs-freebsd-images"
|
||||
FREEBSD="$REPO/releases/download/v2024-12-14"
|
||||
URLzs=""
|
||||
|
||||
# Ubuntu mirrors
|
||||
UBMIRROR="https://cloud-images.ubuntu.com"
|
||||
#UBMIRROR="https://cloud-images.ubuntu.com"
|
||||
#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
|
||||
#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
||||
UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
||||
|
||||
# default nic model for vm's
|
||||
NIC="virtio"
|
||||
@ -34,14 +34,11 @@ case "$OS" in
|
||||
OSNAME="AlmaLinux 9"
|
||||
URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
almalinux10)
|
||||
OSNAME="AlmaLinux 10"
|
||||
OSv="almalinux9"
|
||||
URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
archlinux)
|
||||
OSNAME="Archlinux"
|
||||
URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
|
||||
# dns sometimes fails with that url :/
|
||||
echo "89.187.191.12 geo.mirror.pkgbuild.com" | sudo tee /etc/hosts > /dev/null
|
||||
;;
|
||||
centos-stream10)
|
||||
OSNAME="CentOS Stream 10"
|
||||
@ -61,67 +58,60 @@ case "$OS" in
|
||||
OSNAME="Debian 12"
|
||||
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
|
||||
;;
|
||||
fedora40)
|
||||
OSNAME="Fedora 40"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
|
||||
;;
|
||||
fedora41)
|
||||
OSNAME="Fedora 41"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
|
||||
;;
|
||||
fedora42)
|
||||
OSNAME="Fedora 42"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
|
||||
freebsd13-3r)
|
||||
OSNAME="FreeBSD 13.3-RELEASE"
|
||||
OSv="freebsd13.0"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.3-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd13-4r)
|
||||
FreeBSD="13.4-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSNAME="FreeBSD 13.4-RELEASE"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.4-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd13-5r)
|
||||
FreeBSD="13.5-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
NIC="rtl8139"
|
||||
freebsd14-1r)
|
||||
OSNAME="FreeBSD 14.1-RELEASE"
|
||||
OSv="freebsd14.0"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd14-2r)
|
||||
FreeBSD="14.2-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSNAME="FreeBSD 14.2-RELEASE"
|
||||
OSv="freebsd14.0"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.2-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd14-3r)
|
||||
FreeBSD="14.3-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
;;
|
||||
freebsd13-5s)
|
||||
FreeBSD="13.5-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
freebsd13-4s)
|
||||
OSNAME="FreeBSD 13.4-STABLE"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd14-3s)
|
||||
FreeBSD="14.3-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
freebsd14-2s)
|
||||
OSNAME="FreeBSD 14.2-STABLE"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.2-STABLE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd15-0c)
|
||||
FreeBSD="15.0-CURRENT"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSNAME="FreeBSD 15.0-CURRENT"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-15.0-CURRENT.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
tumbleweed)
|
||||
OSNAME="openSUSE Tumbleweed"
|
||||
@ -129,6 +119,11 @@ case "$OS" in
|
||||
MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
|
||||
URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
|
||||
;;
|
||||
ubuntu20)
|
||||
OSNAME="Ubuntu 20.04"
|
||||
OSv="ubuntu20.04"
|
||||
URL="$UBMIRROR/focal/current/focal-server-cloudimg-amd64.img"
|
||||
;;
|
||||
ubuntu22)
|
||||
OSNAME="Ubuntu 22.04"
|
||||
OSv="ubuntu22.04"
|
||||
@ -152,7 +147,7 @@ echo "ENV=$ENV" >> $ENV
|
||||
# result path
|
||||
echo 'RESPATH="/var/tmp/test_results"' >> $ENV
|
||||
|
||||
# FreeBSD 13 has problems with: e1000 and virtio
|
||||
# FreeBSD 13 has problems with: e1000+virtio
|
||||
echo "NIC=$NIC" >> $ENV
|
||||
|
||||
# freebsd15 -> used in zfs-qemu.yml
|
||||
@ -164,48 +159,34 @@ echo "OSv=\"$OSv\"" >> $ENV
|
||||
# FreeBSD 15 (Current) -> used for summary
|
||||
echo "OSNAME=\"$OSNAME\"" >> $ENV
|
||||
|
||||
# default vm count for testings
|
||||
VMs=2
|
||||
echo "VMs=\"$VMs\"" >> $ENV
|
||||
|
||||
# default cpu count for testing vm's
|
||||
CPU=2
|
||||
echo "CPU=\"$CPU\"" >> $ENV
|
||||
|
||||
sudo mkdir -p "/mnt/tests"
|
||||
sudo chown -R $(whoami) /mnt/tests
|
||||
|
||||
DISK="/dev/zvol/zpool/openzfs"
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
||||
while true; do test -b $DISK && break; sleep 1; done
|
||||
|
||||
# we are downloading via axel, curl and wget are mostly slower and
|
||||
# require more return value checking
|
||||
IMG="/mnt/tests/cloud-image"
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
echo "Loading $URLxz ..."
|
||||
time axel -q -o "$IMG" "$URLxz"
|
||||
echo "Loading $KSRC ..."
|
||||
time axel -q -o ~/src.txz $KSRC
|
||||
IMG="/mnt/tests/cloudimg.qcow2"
|
||||
if [ ! -z "$URLzs" ]; then
|
||||
echo "Loading image $URLzs ..."
|
||||
time axel -q -o "$IMG.zst" "$URLzs"
|
||||
zstd -q -d --rm "$IMG.zst"
|
||||
else
|
||||
echo "Loading $URL ..."
|
||||
echo "Loading image $URL ..."
|
||||
time axel -q -o "$IMG" "$URL"
|
||||
fi
|
||||
|
||||
DISK="/dev/zvol/zpool/openzfs"
|
||||
FORMAT="raw"
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
||||
while true; do test -b $DISK && break; sleep 1; done
|
||||
echo "Importing VM image to zvol..."
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
xzcat -T0 $IMG | sudo dd of=$DISK bs=4M
|
||||
else
|
||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
||||
fi
|
||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
||||
rm -f $IMG
|
||||
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
cat <<EOF > /tmp/user-data
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
fqdn: $OS
|
||||
|
||||
users:
|
||||
- name: root
|
||||
@ -221,19 +202,6 @@ growpart:
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
else
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
|
||||
# minimized config without sudo for nuageinit of FreeBSD
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
sudo virsh net-update default add ip-dhcp-host \
|
||||
"<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
|
||||
@ -249,18 +217,9 @@ sudo virt-install \
|
||||
--graphics none \
|
||||
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
|
||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole >/dev/null
|
||||
|
||||
# Give the VMs hostnames so we don't have to refer to them with
|
||||
# hardcoded IP addresses.
|
||||
#
|
||||
# vm0: Initial VM we install dependencies and build ZFS on.
|
||||
# vm1..2 Testing VMs
|
||||
for ((i=0; i<=VMs; i++)); do
|
||||
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
|
||||
done
|
||||
|
||||
# in case the directory isn't there already
|
||||
mkdir -p $HOME/.ssh
|
||||
|
||||
@ -271,29 +230,3 @@ StrictHostKeyChecking no
|
||||
# small timeout, used in while loops later
|
||||
ConnectTimeout 1
|
||||
EOF
|
||||
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
# enable KSM on Linux
|
||||
sudo virsh dommemstat --domain "openzfs" --period 5
|
||||
sudo virsh node-memory-tune 100 50 1
|
||||
echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
|
||||
else
|
||||
# on FreeBSD we need some more init stuff, because of nuageinit
|
||||
BASH="/usr/local/bin/bash"
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null root@vm0 "uname -a" && break
|
||||
done
|
||||
ssh root@vm0 "pkg install -y bash ca_root_nss git qemu-guest-agent python3 py311-cloud-init"
|
||||
ssh root@vm0 "chsh -s $BASH root"
|
||||
ssh root@vm0 'sysrc qemu_guest_agent_enable="YES"'
|
||||
ssh root@vm0 'sysrc cloudinit_enable="YES"'
|
||||
ssh root@vm0 "pw add user zfs -w no -s $BASH"
|
||||
ssh root@vm0 'mkdir -p ~zfs/.ssh'
|
||||
ssh root@vm0 'echo "zfs ALL=(ALL:ALL) NOPASSWD: ALL" >> /usr/local/etc/sudoers'
|
||||
ssh root@vm0 'echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config'
|
||||
scp ~/.ssh/id_ed25519.pub "root@vm0:~zfs/.ssh/authorized_keys"
|
||||
ssh root@vm0 'chown -R zfs ~zfs'
|
||||
ssh root@vm0 'service sshd restart'
|
||||
scp ~/src.txz "root@vm0:/tmp/src.txz"
|
||||
ssh root@vm0 'tar -C / -zxf /tmp/src.txz'
|
||||
fi
|
||||
|
260
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
260
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
@ -1,260 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 3) install dependencies for compiling and loading
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental Fedora kernel version, like "6.14" to
|
||||
# install instead of Fedora defaults.
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
function archlinux() {
|
||||
echo "##[group]Running pacman -Syu"
|
||||
sudo btrfs filesystem resize max /
|
||||
sudo pacman -Syu --noconfirm
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
||||
samba sysstat rng-tools rsync wget xxhash
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function debian() {
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
echo "##[group]Running apt-get update+upgrade"
|
||||
sudo apt-get update -y
|
||||
sudo apt-get upgrade -y
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo apt-get install -y \
|
||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
||||
python3-cffi python3-dev python3-distlib python3-packaging \
|
||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
||||
rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export ASSUME_ALWAYS_YES="YES"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
||||
gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \
|
||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
||||
sudo pkg install -xy \
|
||||
'^samba4[[:digit:]]+$' \
|
||||
'^py3[[:digit:]]+-cffi$' \
|
||||
'^py3[[:digit:]]+-sysctl$' \
|
||||
'^py3[[:digit:]]+-setuptools$' \
|
||||
'^py3[[:digit:]]+-packaging$'
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# common packages for: almalinux, centos, redhat
|
||||
function rhel() {
|
||||
echo "##[group]Running dnf update"
|
||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
||||
sudo dnf clean all
|
||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
|
||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
||||
if ! sudo dnf group install -y "Development Tools" ; then
|
||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
||||
sudo dnf group install -y development-tools
|
||||
fi
|
||||
|
||||
sudo dnf install -y \
|
||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
||||
rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
|
||||
zlib-devel
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function tumbleweed() {
|
||||
echo "##[group]Running zypper is TODO!"
|
||||
sleep 23456
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# $1: Kernel version to install (like '6.14rc7')
|
||||
function install_fedora_experimental_kernel {
|
||||
|
||||
our_version="$1"
|
||||
sudo dnf -y copr enable @kernel-vanilla/stable
|
||||
sudo dnf -y copr enable @kernel-vanilla/mainline
|
||||
all="$(sudo dnf list --showduplicates kernel-*)"
|
||||
echo "Available versions:"
|
||||
echo "$all"
|
||||
|
||||
# You can have a bunch of minor variants of the version we want '6.14'.
|
||||
# Pick the newest variant (sorted by version number).
|
||||
specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1)
|
||||
list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')"
|
||||
sudo dnf install -y $list
|
||||
sudo dnf -y copr disable @kernel-vanilla/stable
|
||||
sudo dnf -y copr disable @kernel-vanilla/mainline
|
||||
}
|
||||
|
||||
# Install dependencies
|
||||
case "$1" in
|
||||
almalinux8)
|
||||
echo "##[group]Enable epel and powertools repositories"
|
||||
sudo dnf config-manager -y --set-enabled powertools
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-whitelists"
|
||||
sudo dnf install -y kernel-abi-whitelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
almalinux9|almalinux10|centos-stream9|centos-stream10)
|
||||
echo "##[group]Enable epel and crb repositories"
|
||||
sudo dnf config-manager -y --set-enabled crb
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-stablelists"
|
||||
sudo dnf install -y kernel-abi-stablelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
archlinux)
|
||||
archlinux
|
||||
;;
|
||||
debian*)
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
debian
|
||||
echo "##[group]Install Debian specific"
|
||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
fedora*)
|
||||
rhel
|
||||
sudo dnf install -y libunwind-devel
|
||||
|
||||
# Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script'
|
||||
sudo dnf install -y util-linux-script || true
|
||||
|
||||
# Optional: Install an experimental kernel ($2 = kernel version)
|
||||
if [ -n "${2:-}" ] ; then
|
||||
install_fedora_experimental_kernel "$2"
|
||||
fi
|
||||
;;
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
tumbleweed)
|
||||
tumbleweed
|
||||
;;
|
||||
ubuntu*)
|
||||
debian
|
||||
echo "##[group]Install Ubuntu specific"
|
||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
||||
linux-modules-extra-$(uname -r)
|
||||
sudo apt-get install -yq dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# This script is used for checkstyle + zloop deps also.
|
||||
# Install only the needed packages and exit - when used this way.
|
||||
test -z "${ONLY_DEPS:-}" || exit 0
|
||||
|
||||
# Start services
|
||||
echo "##[group]Enable services"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
# add virtio things
|
||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
||||
for i in balloon blk console random scsi; do
|
||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
||||
done
|
||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
||||
sudo -E mount /dev/fd
|
||||
sudo -E touch /etc/zfs/exports
|
||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
||||
sudo -E service nfsd enable
|
||||
sudo -E service qemu-guest-agent enable
|
||||
sudo -E service samba_server enable
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
sudo -E systemctl enable nfs-kernel-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smbd
|
||||
;;
|
||||
*)
|
||||
# All other linux distros
|
||||
sudo -E systemctl enable nfs-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smb
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Setup Kernel cmdline
|
||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
||||
CMDLINE="$CMDLINE selinux=0"
|
||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
||||
CMDLINE="$CMDLINE no_timer_check"
|
||||
case "$1" in
|
||||
almalinux*|centos*|fedora*)
|
||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
||||
GRUB_MKCONFIG="grub2-mkconfig"
|
||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
ubuntu24)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
*)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
archlinux|freebsd*)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "##[group]Edit kernel cmdline"
|
||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sleep 2 && sudo poweroff &
|
||||
exit 0
|
247
.github/workflows/scripts/qemu-3-deps.sh
vendored
247
.github/workflows/scripts/qemu-3-deps.sh
vendored
@ -1,28 +1,229 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 3) Wait for VM to boot from previous step and launch dependencies
|
||||
# script on it.
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental kernel version to install on fedora,
|
||||
# like "6.14".
|
||||
# 3) install dependencies for compiling and loading
|
||||
######################################################################
|
||||
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
set -eu
|
||||
|
||||
# SPECIAL CASE:
|
||||
#
|
||||
# If the user passed in an experimental kernel version to test on Fedora,
|
||||
# we need to update the kernel version in zfs's META file to allow the
|
||||
# build to happen. We update our local copy of META here, since we know
|
||||
# it will be rsync'd up in the next step.
|
||||
if [ -n "${2:-}" ] ; then
|
||||
sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META
|
||||
fi
|
||||
function archlinux() {
|
||||
echo "##[group]Running pacman -Syu"
|
||||
sudo btrfs filesystem resize max /
|
||||
sudo pacman -Syu --noconfirm
|
||||
echo "##[endgroup]"
|
||||
|
||||
scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
|
||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
||||
ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@"
|
||||
# wait for poweroff to succeed
|
||||
tail --pid=$PID -f /dev/null
|
||||
sleep 5 # avoid this: "error: Domain is already active"
|
||||
rm -f $HOME/.ssh/known_hosts
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
||||
samba sysstat rng-tools rsync wget xxhash
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function debian() {
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
echo "##[group]Running apt-get update+upgrade"
|
||||
sudo apt-get update -y
|
||||
sudo apt-get upgrade -y
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo apt-get install -y \
|
||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
||||
python3-cffi python3-dev python3-distlib python3-packaging \
|
||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
||||
rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export ASSUME_ALWAYS_YES="YES"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
||||
gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \
|
||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
||||
sudo pkg install -xy \
|
||||
'^samba4[[:digit:]]+$' \
|
||||
'^py3[[:digit:]]+-cffi$' \
|
||||
'^py3[[:digit:]]+-sysctl$' \
|
||||
'^py3[[:digit:]]+-packaging$'
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# common packages for: almalinux, centos, redhat
|
||||
function rhel() {
|
||||
echo "##[group]Running dnf update"
|
||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
||||
sudo dnf clean all
|
||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
|
||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
||||
if ! sudo dnf group install -y "Development Tools" ; then
|
||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
||||
sudo dnf group install -y development-tools
|
||||
fi
|
||||
|
||||
sudo dnf install -y \
|
||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
||||
rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
|
||||
zlib-devel
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function tumbleweed() {
|
||||
echo "##[group]Running zypper is TODO!"
|
||||
sleep 23456
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# Install dependencies
|
||||
case "$1" in
|
||||
almalinux8)
|
||||
echo "##[group]Enable epel and powertools repositories"
|
||||
sudo dnf config-manager -y --set-enabled powertools
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-whitelists"
|
||||
sudo dnf install -y kernel-abi-whitelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
almalinux9|centos-stream9|centos-stream10)
|
||||
echo "##[group]Enable epel and crb repositories"
|
||||
sudo dnf config-manager -y --set-enabled crb
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-stablelists"
|
||||
sudo dnf install -y kernel-abi-stablelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
archlinux)
|
||||
archlinux
|
||||
;;
|
||||
debian*)
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
debian
|
||||
echo "##[group]Install Debian specific"
|
||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
fedora*)
|
||||
rhel
|
||||
;;
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
tumbleweed)
|
||||
tumbleweed
|
||||
;;
|
||||
ubuntu*)
|
||||
debian
|
||||
echo "##[group]Install Ubuntu specific"
|
||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
||||
linux-modules-extra-$(uname -r)
|
||||
if [ "$1" != "ubuntu20" ]; then
|
||||
sudo apt-get install -yq dh-sequence-dkms
|
||||
fi
|
||||
echo "##[endgroup]"
|
||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# This script is used for checkstyle + zloop deps also.
|
||||
# Install only the needed packages and exit - when used this way.
|
||||
test -z "${ONLY_DEPS:-}" || exit 0
|
||||
|
||||
# Start services
|
||||
echo "##[group]Enable services"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
# add virtio things
|
||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
||||
for i in balloon blk console random scsi; do
|
||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
||||
done
|
||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
||||
sudo -E mount /dev/fd
|
||||
sudo -E touch /etc/zfs/exports
|
||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
||||
sudo -E service nfsd enable
|
||||
sudo -E service qemu-guest-agent enable
|
||||
sudo -E service samba_server enable
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
sudo -E systemctl enable nfs-kernel-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smbd
|
||||
;;
|
||||
*)
|
||||
# All other linux distros
|
||||
sudo -E systemctl enable nfs-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smb
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Setup Kernel cmdline
|
||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
||||
CMDLINE="$CMDLINE selinux=0"
|
||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
||||
CMDLINE="$CMDLINE no_timer_check"
|
||||
case "$1" in
|
||||
almalinux*|centos*|fedora*)
|
||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
||||
GRUB_MKCONFIG="grub2-mkconfig"
|
||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
ubuntu24)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
*)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
archlinux|freebsd*)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "##[group]Edit kernel cmdline"
|
||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sleep 2 && sudo poweroff &
|
||||
exit 0
|
||||
|
379
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
379
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
@ -1,379 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules. This is run on the VMs.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--poweroff]
|
||||
# [--release][--repo][--tarball]
|
||||
#
|
||||
# OS: OS name like 'fedora41'
|
||||
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
|
||||
# --dkms: Build DKMS RPMs as well
|
||||
# --poweroff: Power-off the VM after building
|
||||
# --release Build zfs-release*.rpm as well
|
||||
# --repo After building everything, copy RPMs into /tmp/repo
|
||||
# in the ZFS RPM repository file structure. Also
|
||||
# copy tarballs if they were built.
|
||||
# --tarball: Also build a tarball of ZFS source
|
||||
######################################################################
|
||||
|
||||
ENABLE_DEBUG=""
|
||||
DKMS=""
|
||||
POWEROFF=""
|
||||
RELEASE=""
|
||||
REPO=""
|
||||
TARBALL=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--enable-debug)
|
||||
ENABLE_DEBUG=1
|
||||
shift
|
||||
;;
|
||||
--dkms)
|
||||
DKMS=1
|
||||
shift
|
||||
;;
|
||||
--poweroff)
|
||||
POWEROFF=1
|
||||
shift
|
||||
;;
|
||||
--release)
|
||||
RELEASE=1
|
||||
shift
|
||||
;;
|
||||
--repo)
|
||||
REPO=1
|
||||
shift
|
||||
;;
|
||||
--tarball)
|
||||
TARBALL=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
OS=$1
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -eu
|
||||
|
||||
function run() {
|
||||
LOG="/var/tmp/build-stderr.txt"
|
||||
echo "****************************************************"
|
||||
echo "$(date) ($*)"
|
||||
echo "****************************************************"
|
||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
||||
if [ -f /tmp/rv ]; then
|
||||
RV=$(cat /tmp/rv)
|
||||
echo "****************************************************"
|
||||
echo "exit with value=$RV ($*)"
|
||||
echo "****************************************************"
|
||||
echo 1 > /var/tmp/build-exitcode.txt
|
||||
exit $RV
|
||||
fi
|
||||
}
|
||||
|
||||
# Look at the RPMs in the current directory and copy/move them to
|
||||
# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
|
||||
#
|
||||
# For example:
|
||||
# /tmp/repo/epel-testing/9.5
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# ...
|
||||
function copy_rpms_to_repo {
|
||||
# Pick a RPM to query. It doesn't matter which one - we just want to extract
|
||||
# the 'Build Host' value from it.
|
||||
rpm=$(ls zfs-*.rpm | head -n 1)
|
||||
|
||||
# Get zfs version '2.2.99'
|
||||
zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
|
||||
|
||||
# Get "2.1" or "2.2"
|
||||
zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
|
||||
|
||||
# Get 'almalinux9.5' or 'fedora41' type string
|
||||
build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
|
||||
|
||||
# Get '9.5' or '41' OS version
|
||||
os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
|
||||
|
||||
# Our ZFS version and OS name will determine which repo the RPMs
|
||||
# will go in (regular or testing). Fedora always gets the newest
|
||||
# releases, and Alma gets the older releases.
|
||||
case $build_host in
|
||||
almalinux*)
|
||||
case $zfs_major in
|
||||
2.2)
|
||||
d="epel"
|
||||
;;
|
||||
*)
|
||||
d="epel-testing"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
fedora*)
|
||||
d="fedora"
|
||||
;;
|
||||
esac
|
||||
|
||||
prefix=/tmp/repo
|
||||
dst="$prefix/$d/$os_ver"
|
||||
|
||||
# Special case: move zfs-release*.rpm out of the way first (if we built them).
|
||||
# This will make filtering the other RPMs easier.
|
||||
mkdir -p $dst
|
||||
mv zfs-release*.rpm $dst || true
|
||||
|
||||
# Copy source RPMs
|
||||
mkdir -p $dst/SRPMS
|
||||
cp $(ls *.src.rpm) $dst/SRPMS/
|
||||
|
||||
if [[ "$build_host" =~ "almalinux" ]] ; then
|
||||
# Copy kmods+userspace
|
||||
mkdir -p $dst/kmod/x86_64/debug
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
|
||||
cp *debuginfo*.rpm $dst/kmod/x86_64/debug
|
||||
fi
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
# Copy dkms+userspace
|
||||
mkdir -p $dst/x86_64
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
|
||||
fi
|
||||
|
||||
# Copy debug
|
||||
mkdir -p $dst/x86_64/debug
|
||||
cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
extra="${1:-}"
|
||||
|
||||
export MAKE="gmake"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr/local \
|
||||
--with-libintl-prefix=/usr/local \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo gmake install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function linux() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo make install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function rpm_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
# Build RPMs with XZ compression by default (since gzip decompression is slow)
|
||||
echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure --enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make pkg-kmod pkg-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
echo "##[group]DKMS"
|
||||
make rpm-dkms
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "Skipping install since we're only building RPMs and nothing else"
|
||||
else
|
||||
echo "##[group]Install"
|
||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
# Optionally build the zfs-release.*.rpm
|
||||
if [ -n "$RELEASE" ] ; then
|
||||
echo "##[group]Release"
|
||||
pushd ~
|
||||
sudo dnf -y install rpm-build || true
|
||||
# Check out a sparse copy of zfsonlinux.github.com.git so we don't get
|
||||
# all the binaries. We just need a few kilobytes of files to build RPMs.
|
||||
git clone --depth 1 --no-checkout \
|
||||
https://github.com/zfsonlinux/zfsonlinux.github.com.git
|
||||
|
||||
cd zfsonlinux.github.com
|
||||
git sparse-checkout set zfs-release
|
||||
git checkout
|
||||
cd zfs-release
|
||||
|
||||
mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
|
||||
cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
|
||||
cp zfs-release.spec ~/rpmbuild/SPECS/
|
||||
rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
|
||||
|
||||
# ZFS release RPMs are built. Copy them to the ~/zfs directory just to
|
||||
# keep all the RPMs in the same place.
|
||||
cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
|
||||
cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
|
||||
|
||||
popd
|
||||
rm -fr ~/rpmbuild
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "##[group]Repo"
|
||||
copy_rpms_to_repo
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
}
|
||||
|
||||
function deb_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make native-deb-kmod native-deb-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
# Do kmod install. Note that when you build the native debs, the
|
||||
# packages themselves are placed in parent directory '../' rather than
|
||||
# in the source directory like the rpms are.
|
||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
||||
| grep -Ev 'dkms|dracut')
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function build_tarball {
|
||||
if [ -n "$REPO" ] ; then
|
||||
./autogen.sh
|
||||
./configure --with-config=srpm
|
||||
make dist
|
||||
mkdir -p /tmp/repo/releases
|
||||
# The tarball name is based off of 'Version' field in the META file.
|
||||
mv *.tar.gz /tmp/repo/releases/
|
||||
fi
|
||||
}
|
||||
|
||||
# Debug: show kernel cmdline
|
||||
if [ -f /proc/cmdline ] ; then
|
||||
cat /proc/cmdline || true
|
||||
fi
|
||||
|
||||
# Set our hostname to our OS name and version number. Specifically, we set the
|
||||
# major and minor number so that when we query the Build Host field in the RPMs
|
||||
# we build, we can see what specific version of Fedora/Almalinux we were using
|
||||
# to build them. This is helpful for matching up KMOD versions.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# rhel8.10
|
||||
# almalinux9.5
|
||||
# fedora42
|
||||
source /etc/os-release
|
||||
sudo hostname "$ID$VERSION_ID"
|
||||
|
||||
# save some sysinfo
|
||||
uname -a > /var/tmp/uname.txt
|
||||
|
||||
cd $HOME/zfs
|
||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
||||
|
||||
extra=""
|
||||
if [ -n "$ENABLE_DEBUG" ] ; then
|
||||
extra="--enable-debug"
|
||||
fi
|
||||
|
||||
# build
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
freebsd "$extra"
|
||||
;;
|
||||
alma*|centos*)
|
||||
rpm_build_and_install "--with-spec=redhat $extra"
|
||||
;;
|
||||
fedora*)
|
||||
rpm_build_and_install "$extra"
|
||||
|
||||
# Historically, we've always built the release tarballs on Fedora, since
|
||||
# there was one instance long ago where we built them on CentOS 7, and they
|
||||
# didn't work correctly for everyone.
|
||||
if [ -n "$TARBALL" ] ; then
|
||||
build_tarball
|
||||
fi
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
deb_build_and_install "$extra"
|
||||
;;
|
||||
*)
|
||||
linux "$extra"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# building the zfs module was ok
|
||||
echo 0 > /var/tmp/build-exitcode.txt
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
if [ -n "$POWEROFF" ] ; then
|
||||
sudo cloud-init clean --logs
|
||||
sync && sleep 2 && sudo poweroff &
|
||||
fi
|
||||
exit 0
|
150
.github/workflows/scripts/qemu-4-build.sh
vendored
150
.github/workflows/scripts/qemu-4-build.sh
vendored
@ -3,9 +3,151 @@
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules
|
||||
######################################################################
|
||||
echo "Build modules in QEMU machine"
|
||||
|
||||
# Bring our VM back up and copy over ZFS source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
set -eu
|
||||
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
|
||||
function run() {
|
||||
LOG="/var/tmp/build-stderr.txt"
|
||||
echo "****************************************************"
|
||||
echo "$(date) ($*)"
|
||||
echo "****************************************************"
|
||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
||||
if [ -f /tmp/rv ]; then
|
||||
RV=$(cat /tmp/rv)
|
||||
echo "****************************************************"
|
||||
echo "exit with value=$RV ($*)"
|
||||
echo "****************************************************"
|
||||
echo 1 > /var/tmp/build-exitcode.txt
|
||||
exit $RV
|
||||
fi
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export MAKE="gmake"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr/local \
|
||||
--with-libintl-prefix=/usr/local \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo gmake install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function linux() {
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo make install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function rpm_build_and_install() {
|
||||
EXTRA_CONFIG="${1:-}"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure --enable-debug --enable-debuginfo $EXTRA_CONFIG
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make pkg-kmod pkg-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -v src.rpm)
|
||||
echo "##[endgroup]"
|
||||
|
||||
}
|
||||
|
||||
function deb_build_and_install() {
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make native-deb-kmod native-deb-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
# Do kmod install. Note that when you build the native debs, the
|
||||
# packages themselves are placed in parent directory '../' rather than
|
||||
# in the source directory like the rpms are.
|
||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
||||
| grep -Ev 'dkms|dracut')
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# Debug: show kernel cmdline
|
||||
if [ -f /proc/cmdline ] ; then
|
||||
cat /proc/cmdline || true
|
||||
fi
|
||||
|
||||
# save some sysinfo
|
||||
uname -a > /var/tmp/uname.txt
|
||||
|
||||
cd $HOME/zfs
|
||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
||||
|
||||
# build
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
alma*|centos*)
|
||||
rpm_build_and_install "--with-spec=redhat"
|
||||
;;
|
||||
fedora*)
|
||||
rpm_build_and_install
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
deb_build_and_install
|
||||
;;
|
||||
*)
|
||||
linux
|
||||
;;
|
||||
esac
|
||||
|
||||
# building the zfs module was ok
|
||||
echo 0 > /var/tmp/build-exitcode.txt
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sync && sleep 2 && sudo poweroff &
|
||||
exit 0
|
||||
|
38
.github/workflows/scripts/qemu-5-setup.sh
vendored
38
.github/workflows/scripts/qemu-5-setup.sh
vendored
@ -14,33 +14,39 @@ PID=$(pidof /usr/bin/qemu-system-x86_64)
|
||||
tail --pid=$PID -f /dev/null
|
||||
sudo virsh undefine openzfs
|
||||
|
||||
# default values per test vm:
|
||||
VMs=2
|
||||
CPU=2
|
||||
|
||||
# cpu pinning
|
||||
CPUSET=("0,1" "2,3")
|
||||
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
# FreeBSD needs only 6GiB
|
||||
# FreeBSD can't be optimized via ksmtuned
|
||||
RAM=6
|
||||
;;
|
||||
*)
|
||||
# Linux needs more memory, but can be optimized to share it via KSM
|
||||
# Linux can be optimized via ksmtuned
|
||||
RAM=8
|
||||
;;
|
||||
esac
|
||||
|
||||
# this can be different for each distro
|
||||
echo "VMs=$VMs" >> $ENV
|
||||
|
||||
# create snapshot we can clone later
|
||||
sudo zfs snapshot zpool/openzfs@now
|
||||
|
||||
# setup the testing vm's
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
for i in $(seq 1 $VMs); do
|
||||
|
||||
# start testing VMs
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
echo "Creating disk for vm$i..."
|
||||
DISK="/dev/zvol/zpool/vm$i"
|
||||
FORMAT="raw"
|
||||
sudo zfs clone zpool/openzfs@now zpool/vm$i-system
|
||||
sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
|
||||
sudo zfs clone zpool/openzfs@now zpool/vm$i
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
|
||||
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
@ -77,21 +83,23 @@ EOF
|
||||
--graphics none \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
||||
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole >/dev/null
|
||||
done
|
||||
|
||||
# generate some memory stats
|
||||
# check the memory state from time to time
|
||||
cat <<EOF > cronjob.sh
|
||||
# $OS
|
||||
exec 1>>/var/tmp/stats.txt
|
||||
exec 2>&1
|
||||
echo "********************************************************************************"
|
||||
echo "*******************************************************"
|
||||
date
|
||||
uptime
|
||||
free -m
|
||||
df -h /mnt/tests
|
||||
zfs list
|
||||
EOF
|
||||
|
||||
sudo chmod +x cronjob.sh
|
||||
sudo mv -f cronjob.sh /root/cronjob.sh
|
||||
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
||||
@ -100,15 +108,17 @@ rm crontab.txt
|
||||
|
||||
# check if the machines are okay
|
||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
||||
for i in $(seq 1 $VMs); do
|
||||
while true; do
|
||||
ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break
|
||||
done
|
||||
done
|
||||
echo "All $VMs VMs are up now."
|
||||
|
||||
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
||||
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
||||
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
mkdir -p $RESPATH/vm$i
|
||||
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
||||
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
||||
|
22
.github/workflows/scripts/qemu-6-tests.sh
vendored
22
.github/workflows/scripts/qemu-6-tests.sh
vendored
@ -45,7 +45,7 @@ if [ -z ${1:-} ]; then
|
||||
echo 0 > /tmp/ctr
|
||||
date "+%s" > /tmp/tsstart
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
IP="192.168.122.1$i"
|
||||
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
|
||||
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
|
||||
@ -58,7 +58,7 @@ if [ -z ${1:-} ]; then
|
||||
done
|
||||
|
||||
# wait for all vm's to finish
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
tail --pid=$(cat vm${i}.pid) -f /dev/null
|
||||
pid=$(cat vm${i}log.pid)
|
||||
rm -f vm${i}log.pid
|
||||
@ -72,31 +72,19 @@ fi
|
||||
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
TDIR="/usr/local/share/zfs"
|
||||
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
|
||||
sudo -E ./zfs/scripts/zfs.sh
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
|
||||
sudo mount -o noatime /dev/vtbd1 /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
TDIR="/usr/local/share/zfs"
|
||||
;;
|
||||
*)
|
||||
# use xfs @ /var/tmp for all distros
|
||||
TDIR="/usr/share/zfs"
|
||||
sudo -E modprobe zfs
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo mkfs.xfs -fq /dev/vdb
|
||||
sudo mount -o noatime /dev/vdb /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
;;
|
||||
esac
|
||||
|
||||
# enable io_uring on el9/el10
|
||||
case "$1" in
|
||||
almalinux9|almalinux10|centos-stream*)
|
||||
sudo sysctl kernel.io_uring_disabled=0 > /dev/null
|
||||
sudo -E modprobe zfs
|
||||
TDIR="/usr/share/zfs"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
9
.github/workflows/scripts/qemu-7-prepare.sh
vendored
9
.github/workflows/scripts/qemu-7-prepare.sh
vendored
@ -28,16 +28,15 @@ BASE="$HOME/work/zfs/zfs"
|
||||
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
|
||||
|
||||
# catch result files of testings (vm's should be there)
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
|
||||
for i in $(seq 1 $VMs); do
|
||||
rsync -arL zfs@192.168.122.1$i:$RESPATH/current $RESPATH/vm$i || true
|
||||
scp zfs@192.168.122.1$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
||||
done
|
||||
cp -f /var/tmp/*.txt $RESPATH || true
|
||||
cd $RESPATH
|
||||
|
||||
# prepare result files for summary
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
file="vm$i/build-stderr.txt"
|
||||
test -s $file && mv -f $file build-stderr.txt
|
||||
|
||||
|
2
.github/workflows/scripts/qemu-8-summary.sh
vendored
2
.github/workflows/scripts/qemu-8-summary.sh
vendored
@ -45,7 +45,7 @@ fi
|
||||
|
||||
echo -e "\nFull logs for download:\n $1\n"
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
rv=$(cat vm$i/tests-exitcode.txt)
|
||||
|
||||
if [ $rv = 0 ]; then
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Helper script to run after installing dependencies. This brings the VM back
|
||||
# up and copies over the zfs source directory.
|
||||
echo "Build modules in QEMU machine"
|
||||
sudo virsh start openzfs
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
|
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
@ -1,90 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Do a test install of ZFS from an external repository.
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# ./qemu-test-repo-vm [URL]
|
||||
#
|
||||
# URL: URL to use instead of http://download.zfsonlinux.org
|
||||
# If blank, use the default repo from zfs-release RPM.
|
||||
|
||||
set -e
|
||||
|
||||
source /etc/os-release
|
||||
OS="$ID"
|
||||
VERSION="$VERSION_ID"
|
||||
|
||||
ALTHOST=""
|
||||
if [ -n "$1" ] ; then
|
||||
ALTHOST="$1"
|
||||
fi
|
||||
|
||||
# Write summary to /tmp/repo so our artifacts scripts pick it up
|
||||
mkdir /tmp/repo
|
||||
SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
|
||||
|
||||
# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
|
||||
# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
|
||||
# install from. Blank means use default from zfs-release RPM.
|
||||
function test_install {
|
||||
repo=$1
|
||||
host=""
|
||||
if [ -n "$2" ] ; then
|
||||
host=$2
|
||||
fi
|
||||
|
||||
args="--disablerepo=zfs --enablerepo=$repo"
|
||||
|
||||
# If we supplied an alternate repo URL, and have not already edited
|
||||
# zfs.repo, then update the repo file.
|
||||
if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
|
||||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
||||
fi
|
||||
|
||||
sudo dnf -y install $args zfs zfs-test
|
||||
|
||||
# Load modules and create a simple pool as a sanity test.
|
||||
sudo /usr/share/zfs/zfs.sh -r
|
||||
truncate -s 100M /tmp/file
|
||||
sudo zpool create tank /tmp/file
|
||||
sudo zpool status
|
||||
|
||||
# Print out repo name, rpm installed (kmod or dkms), and repo URL
|
||||
baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
|
||||
package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
|
||||
|
||||
echo "$repo $package $baseurl" >> $SUMMARY
|
||||
|
||||
sudo zpool destroy tank
|
||||
sudo rm /tmp/file
|
||||
sudo dnf -y remove zfs
|
||||
}
|
||||
|
||||
echo "##[group]Installing from repo"
|
||||
# The openzfs docs are the authoritative instructions for the install. Use
|
||||
# the specific version of zfs-release RPM it recommends.
|
||||
case $OS in
|
||||
almalinux*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
||||
sudo rpm -qi zfs-release
|
||||
test_install zfs $ALTHOST
|
||||
test_install zfs-kmod $ALTHOST
|
||||
test_install zfs-testing $ALTHOST
|
||||
test_install zfs-testing-kmod $ALTHOST
|
||||
;;
|
||||
fedora*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
||||
test_install zfs $ALTHOST
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Write out a simple version of the summary here. Later on we will collate all
|
||||
# the summaries and put them into a nice table in the workflow Summary page.
|
||||
echo "Summary: "
|
||||
cat $SUMMARY
|
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Wait for a VM to boot up and become active. This is used in a number of our
|
||||
# scripts.
|
||||
#
|
||||
# $1: VM hostname or IP address
|
||||
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$1 "uname -a" && break
|
||||
done
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Recursively go though a directory structure and replace duplicate files with
|
||||
# symlinks. This cuts down our RPM repo size by ~25%.
|
||||
#
|
||||
# replace-dupes-with-symlinks.sh [DIR]
|
||||
#
|
||||
# DIR: Directory to traverse. Defaults to current directory if not specified.
|
||||
#
|
||||
|
||||
src="$1"
|
||||
if [ -z "$src" ] ; then
|
||||
src="."
|
||||
fi
|
||||
|
||||
declare -A db
|
||||
|
||||
pushd "$src"
|
||||
while read line ; do
|
||||
bn="$(basename $line)"
|
||||
if [ -z "${db[$bn]}" ] ; then
|
||||
# First time this file has been seen
|
||||
db[$bn]="$line"
|
||||
else
|
||||
if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
|
||||
# Files are the same, make a symlink
|
||||
rm "$line"
|
||||
ln -sr "${db[$bn]}" "$line"
|
||||
fi
|
||||
fi
|
||||
done <<< "$(find . -type f)"
|
||||
popd
|
140
.github/workflows/zfs-qemu-packages.yml
vendored
140
.github/workflows/zfs-qemu-packages.yml
vendored
@ -1,140 +0,0 @@
|
||||
# This workflow is used to build and test RPM packages. It is a
|
||||
# 'workflow_dispatch' workflow, which means it gets run manually.
|
||||
#
|
||||
# The workflow has a dropdown menu with two options:
|
||||
#
|
||||
# Build RPMs - Build release RPMs and tarballs and put them into an artifact
|
||||
# ZIP file. The directory structure used in the ZIP file mirrors
|
||||
# the ZFS yum repo.
|
||||
#
|
||||
# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this
|
||||
# will do a DKMS and KMOD test install from both the regular and
|
||||
# testing repos. On Fedora, it will do a DKMS install from the
|
||||
# regular repo. All test install results will be displayed in the
|
||||
# Summary page. Note that the workflow provides an optional text
|
||||
# text box where you can specify the full URL to an alternate repo.
|
||||
# If left blank, it will install from the default repo from the
|
||||
# zfs-release RPM (http://download.zfsonlinux.org).
|
||||
#
|
||||
# Most users will never need to use this workflow. It will be used primary by
|
||||
# ZFS admins for building and testing releases.
|
||||
#
|
||||
name: zfs-qemu-packages
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_type:
|
||||
type: choice
|
||||
required: false
|
||||
default: "Build RPMs"
|
||||
description: "Build RPMs or test the repo?"
|
||||
options:
|
||||
- "Build RPMs"
|
||||
- "Test repo"
|
||||
repo_url:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)"
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
zfs-qemu-packages-jobs:
|
||||
name: qemu-VMs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ['almalinux8', 'almalinux9', 'almalinux10', 'fedora41', 'fedora42']
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup QEMU
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
||||
|
||||
- name: Start build machine
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
||||
|
||||
- name: Build modules or Test repo
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
set -e
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
# Bring VM back up and copy over zfs source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
|
||||
mkdir -p /tmp/repo
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
|
||||
else
|
||||
.github/workflows/scripts/qemu-4-build.sh --repo --release --dkms --tarball ${{ matrix.os }}
|
||||
fi
|
||||
|
||||
- name: Prepare artifacts
|
||||
if: always()
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
rsync -a zfs@vm0:/tmp/repo /tmp || true
|
||||
.github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo
|
||||
tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ matrix.os }}-repo
|
||||
path: ${{ matrix.os }}-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 2
|
||||
if-no-files-found: ignore
|
||||
|
||||
combine_repos:
|
||||
if: always()
|
||||
needs: [zfs-qemu-packages-jobs]
|
||||
name: "Results"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
id: artifact-download
|
||||
if: always()
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
for i in $(find . -type f -iname "*.tar") ; do
|
||||
tar -xf $i -C /tmp
|
||||
done
|
||||
tar -cf all-repo.tar -C /tmp repo
|
||||
|
||||
# If we're installing from a repo, print out the summary of the versions
|
||||
# that got installed using Markdown.
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
cd /tmp/repo
|
||||
for i in $(ls *.txt) ; do
|
||||
nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')"
|
||||
echo "### $nicename" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY
|
||||
awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY
|
||||
done
|
||||
fi
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload2
|
||||
if: always()
|
||||
with:
|
||||
name: all-repo
|
||||
path: all-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 5
|
||||
if-no-files-found: ignore
|
60
.github/workflows/zfs-qemu.yml
vendored
60
.github/workflows/zfs-qemu.yml
vendored
@ -15,11 +15,6 @@ on:
|
||||
required: false
|
||||
default: false
|
||||
description: 'Test on CentOS 10 stream'
|
||||
fedora_kernel_ver:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) Experimental kernel version to install on Fedora (like '6.14' or '6.13.3-0.rc3')"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@ -39,8 +34,8 @@ jobs:
|
||||
- name: Generate OS config and CI type
|
||||
id: os
|
||||
run: |
|
||||
FULL_OS='["almalinux8", "almalinux9", "almalinux10", "debian11", "debian12", "fedora41", "fedora42", "freebsd13-4r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
|
||||
QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-3s", "ubuntu24"]'
|
||||
FULL_OS='["almalinux8", "almalinux9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-3r", "freebsd13-4s", "freebsd14-1r", "freebsd14-2s", "freebsd15-0c", "ubuntu20", "ubuntu22", "ubuntu24"]'
|
||||
QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd13-3r", "freebsd14-2r", "ubuntu24"]'
|
||||
# determine CI type when running on PR
|
||||
ci_type="full"
|
||||
if ${{ github.event_name == 'pull_request' }}; then
|
||||
@ -53,16 +48,8 @@ jobs:
|
||||
else
|
||||
os_selection="$FULL_OS"
|
||||
fi
|
||||
|
||||
if [ ${{ github.event.inputs.fedora_kernel_ver }} != "" ] ; then
|
||||
# They specified a custom kernel version for Fedora. Use only
|
||||
# Fedora runners.
|
||||
os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]')
|
||||
else
|
||||
# Normal case
|
||||
os_json=$(echo ${os_selection} | jq -c)
|
||||
fi
|
||||
|
||||
os_json=$(echo ${os_selection} | jq -c)
|
||||
|
||||
# Add optional runners
|
||||
if [ "${{ github.event.inputs.include_stream9 }}" == 'true' ]; then
|
||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream9"]')
|
||||
@ -70,23 +57,26 @@ jobs:
|
||||
if [ "${{ github.event.inputs.include_stream10 }}" == 'true' ]; then
|
||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream10"]')
|
||||
fi
|
||||
|
||||
|
||||
echo $os_json
|
||||
echo "os=$os_json" >> $GITHUB_OUTPUT
|
||||
echo "ci_type=$ci_type" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
|
||||
|
||||
qemu-vm:
|
||||
name: qemu-x86
|
||||
needs: [ test-config ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora41
|
||||
# debian: debian11, debian12, ubuntu22, ubuntu24
|
||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora40, fedora41
|
||||
# debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24
|
||||
# misc: archlinux, tumbleweed
|
||||
# FreeBSD variants of 2025-06:
|
||||
# FreeBSD Release: freebsd13-4r, freebsd13-5r, freebsd14-1r, freebsd14-2r, freebsd14-3r
|
||||
# FreeBSD Stable: freebsd13-5s, freebsd14-3s
|
||||
# FreeBSD variants of 2024-12:
|
||||
# FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-1r, freebsd14-2r
|
||||
# FreeBSD Stable: freebsd13-4s, freebsd14-2s
|
||||
# FreeBSD Current: freebsd15-0c
|
||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
||||
runs-on: ubuntu-24.04
|
||||
@ -105,11 +95,31 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} ${{ github.event.inputs.fedora_kernel_ver }}
|
||||
run: |
|
||||
echo "Install dependencies in QEMU machine"
|
||||
IP=192.168.122.10
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$IP "uname -a" && break
|
||||
done
|
||||
scp .github/workflows/scripts/qemu-3-deps.sh zfs@$IP:qemu-3-deps.sh
|
||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
||||
ssh zfs@$IP '$HOME/qemu-3-deps.sh' ${{ matrix.os }}
|
||||
# wait for poweroff to succeed
|
||||
tail --pid=$PID -f /dev/null
|
||||
sleep 5 # avoid this: "error: Domain is already active"
|
||||
rm -f $HOME/.ssh/known_hosts
|
||||
|
||||
- name: Build modules
|
||||
timeout-minutes: 30
|
||||
run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }}
|
||||
run: |
|
||||
echo "Build modules in QEMU machine"
|
||||
sudo virsh start openzfs
|
||||
IP=192.168.122.10
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$IP "uname -a" && break
|
||||
done
|
||||
rsync -ar $HOME/work/zfs/zfs zfs@$IP:./
|
||||
ssh zfs@$IP '$HOME/zfs/.github/workflows/scripts/qemu-4-build.sh' ${{ matrix.os }}
|
||||
|
||||
- name: Setup testing machines
|
||||
timeout-minutes: 5
|
||||
|
2
.github/workflows/zloop.yml
vendored
2
.github/workflows/zloop.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu24
|
||||
- name: Autogen.sh
|
||||
run: |
|
||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
||||
|
11
.mailmap
11
.mailmap
@ -71,7 +71,6 @@ Rob Norris <rob.norris@klarasystems.com>
|
||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Stoiko Ivanov <github@nomore.at>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
WHR <msl0000023508@gmail.com>
|
||||
@ -79,14 +78,9 @@ Yanping Gao <yanping.gao@xtaotech.com>
|
||||
Youzhong Yang <youzhong@gmail.com>
|
||||
|
||||
# Signed-off-by: overriding Author:
|
||||
Alexander Ziaee <ziaee@FreeBSD.org> <concussious@runbox.com>
|
||||
Felix Schmidt <felixschmidt20@aol.com> <f.sch.prototype@gmail.com>
|
||||
Olivier Certner <olce@FreeBSD.org> <olce.freebsd@certner.fr>
|
||||
Phil Sutter <phil@nwl.cc> <p.github@nwl.cc>
|
||||
poscat <poscat@poscat.moe> <poscat0x04@outlook.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
||||
Sietse <sietse@wizdom.nu> <uglymotha@wizdom.nu>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
||||
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
|
||||
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
|
||||
|
||||
@ -103,7 +97,6 @@ Tulsi Jain <tulsi.jain@delphix.com> <tulsi.jain@Tulsi-Jains-MacBook-Pro.local>
|
||||
# Mappings from Github no-reply addresses
|
||||
ajs124 <git@ajs124.de> <ajs124@users.noreply.github.com>
|
||||
Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com> <61714074+AleksandrLiber@users.noreply.github.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
||||
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
||||
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
||||
@ -140,7 +133,6 @@ Fedor Uporov <fuporov.vstack@gmail.com> <60701163+fuporovvStack@users.noreply.gi
|
||||
Felix Dörre <felix@dogcraft.de> <felixdoerre@users.noreply.github.com>
|
||||
Felix Neumärker <xdch47@posteo.de> <34678034+xdch47@users.noreply.github.com>
|
||||
Finix Yan <yancw@info2soft.com> <Finix1979@users.noreply.github.com>
|
||||
Friedrich Weber <f.weber@proxmox.com> <56110206+frwbr@users.noreply.github.com>
|
||||
Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
|
||||
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
|
||||
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
|
||||
@ -215,7 +207,6 @@ Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
||||
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
||||
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com> <88050553+vaibhav-delphix@users.noreply.github.com>
|
||||
Vandana Rungta <vrungta@amazon.com> <46906819+vandanarungta@users.noreply.github.com>
|
||||
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
||||
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
||||
|
24
AUTHORS
24
AUTHORS
@ -29,7 +29,6 @@ CONTRIBUTORS:
|
||||
Alejandro Colomar <Colomar.6.4.3@GMail.com>
|
||||
Alejandro R. Sedeño <asedeno@mit.edu>
|
||||
Alek Pinchuk <alek@nexenta.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alexander Eremin <a.eremin@nexenta.com>
|
||||
Alexander Lobakin <alobakin@pm.me>
|
||||
@ -37,7 +36,6 @@ CONTRIBUTORS:
|
||||
Alexander Pyhalov <apyhalov@gmail.com>
|
||||
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
||||
Alexander Stetsenko <ams@nexenta.com>
|
||||
Alexander Ziaee <ziaee@FreeBSD.org>
|
||||
Alex Braunegg <alex.braunegg@gmail.com>
|
||||
Alexey Shvetsov <alexxy@gentoo.org>
|
||||
Alexey Smirnoff <fling@member.fsf.org>
|
||||
@ -82,7 +80,6 @@ CONTRIBUTORS:
|
||||
Arne Jansen <arne@die-jansens.de>
|
||||
Aron Xu <happyaron.xu@gmail.com>
|
||||
Arshad Hussain <arshad.hussain@aeoncomputing.com>
|
||||
Artem <artem.vlasenko@ossrevival.org>
|
||||
Arun KV <arun.kv@datacore.com>
|
||||
Arvind Sankar <nivedita@alum.mit.edu>
|
||||
Attila Fülöp <attila@fueloep.org>
|
||||
@ -229,12 +226,10 @@ CONTRIBUTORS:
|
||||
Fedor Uporov <fuporov.vstack@gmail.com>
|
||||
Felix Dörre <felix@dogcraft.de>
|
||||
Felix Neumärker <xdch47@posteo.de>
|
||||
Felix Schmidt <felixschmidt20@aol.com>
|
||||
Feng Sun <loyou85@gmail.com>
|
||||
Finix Yan <yancw@info2soft.com>
|
||||
Francesco Mazzoli <f@mazzo.li>
|
||||
Frederik Wessels <wessels147@gmail.com>
|
||||
Friedrich Weber <f.weber@proxmox.com>
|
||||
Frédéric Vanniere <f.vanniere@planet-work.com>
|
||||
Gabriel A. Devenyi <gdevenyi@gmail.com>
|
||||
Garrett D'Amore <garrett@nexenta.com>
|
||||
@ -293,7 +288,6 @@ CONTRIBUTORS:
|
||||
ilovezfs <ilovezfs@icloud.com>
|
||||
InsanePrawn <Insane.Prawny@gmail.com>
|
||||
Isaac Huang <he.huang@intel.com>
|
||||
Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
|
||||
Jacek Fefliński <feflik@gmail.com>
|
||||
Jacob Adams <tookmund@gmail.com>
|
||||
Jake Howard <git@theorangeone.net>
|
||||
@ -301,7 +295,6 @@ CONTRIBUTORS:
|
||||
James H <james@kagisoft.co.uk>
|
||||
James Lee <jlee@thestaticvoid.com>
|
||||
James Pan <jiaming.pan@yahoo.com>
|
||||
James Reilly <jreilly1821@gmail.com>
|
||||
James Wah <james@laird-wah.net>
|
||||
Jan Engelhardt <jengelh@inai.de>
|
||||
Jan Kryl <jan.kryl@nexenta.com>
|
||||
@ -313,7 +306,6 @@ CONTRIBUTORS:
|
||||
Jason Lee <jasonlee@lanl.gov>
|
||||
Jason Zaman <jasonzaman@gmail.com>
|
||||
Javen Wu <wu.javen@gmail.com>
|
||||
Jaydeep Kshirsagar <jkshirsagar@maxlinear.com>
|
||||
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
||||
Jeff Dike <jdike@akamai.com>
|
||||
Jeremy Faulkner <gldisater@gmail.com>
|
||||
@ -321,7 +313,6 @@ CONTRIBUTORS:
|
||||
Jeremy Jones <jeremy@delphix.com>
|
||||
Jeremy Visser <jeremy.visser@gmail.com>
|
||||
Jerry Jelinek <jerry.jelinek@joyent.com>
|
||||
Jerzy Kołosowski <jerzy@kolosowscy.pl>
|
||||
Jessica Clarke <jrtc27@jrtc27.com>
|
||||
Jinshan Xiong <jinshan.xiong@intel.com>
|
||||
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
||||
@ -381,7 +372,6 @@ CONTRIBUTORS:
|
||||
Kohsuke Kawaguchi <kk@kohsuke.org>
|
||||
Konstantin Khorenko <khorenko@virtuozzo.com>
|
||||
KORN Andras <korn@elan.rulez.org>
|
||||
kotauskas <v.toncharov@gmail.com>
|
||||
Kristof Provost <github@sigsegv.be>
|
||||
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
||||
Kyle Blatter <kyleblatter@llnl.gov>
|
||||
@ -462,7 +452,6 @@ CONTRIBUTORS:
|
||||
Mike Swanson <mikeonthecomputer@gmail.com>
|
||||
Milan Jurik <milan.jurik@xylab.cz>
|
||||
Minsoo Choo <minsoochoo0122@proton.me>
|
||||
mnrx <mnrx@users.noreply.github.com>
|
||||
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
||||
Morgan Jones <mjones@rice.edu>
|
||||
Moritz Maxeiner <moritz@ucworks.org>
|
||||
@ -488,7 +477,7 @@ CONTRIBUTORS:
|
||||
Olaf Faaland <faaland1@llnl.gov>
|
||||
Oleg Drokin <green@linuxhacker.ru>
|
||||
Oleg Stepura <oleg@stepura.com>
|
||||
Olivier Certner <olce@FreeBSD.org>
|
||||
Olivier Certner <olce.freebsd@certner.fr>
|
||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||
omni <omni+vagant@hack.org>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
@ -505,7 +494,6 @@ CONTRIBUTORS:
|
||||
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
||||
Pedro Giffuni <pfg@freebsd.org>
|
||||
Peng <peng.hse@xtaotech.com>
|
||||
Peng Liu <littlenewton6@gmail.com>
|
||||
Peter Ashford <ashford@accs.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Doherty <peterd@acranox.org>
|
||||
@ -515,18 +503,15 @@ CONTRIBUTORS:
|
||||
Philip Pokorny <ppokorny@penguincomputing.com>
|
||||
Philipp Riederer <pt@philipptoelke.de>
|
||||
Phil Kauffman <philip@kauffman.me>
|
||||
Phil Sutter <phil@nwl.cc>
|
||||
Ping Huang <huangping@smartx.com>
|
||||
Piotr Kubaj <pkubaj@anongoth.pl>
|
||||
Piotr P. Stefaniak <pstef@freebsd.org>
|
||||
poscat <poscat@poscat.moe>
|
||||
Prakash Surya <prakash.surya@delphix.com>
|
||||
Prasad Joshi <prasadjoshi124@gmail.com>
|
||||
privb0x23 <privb0x23@users.noreply.github.com>
|
||||
P.SCH <p88@yahoo.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com>
|
||||
Quartz <yyhran@163.com>
|
||||
Quentin Thébault <quentin.thebault@defenso.fr>
|
||||
Quentin Zdanis <zdanisq@gmail.com>
|
||||
Rafael Kitover <rkitover@gmail.com>
|
||||
RageLtMan <sempervictus@users.noreply.github.com>
|
||||
@ -578,7 +563,6 @@ CONTRIBUTORS:
|
||||
Scot W. Stevenson <scot.stevenson@gmail.com>
|
||||
Sean Eric Fagan <sef@ixsystems.com>
|
||||
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
||||
Sebastian Pauka <me@spauka.se>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
Sebastien Roy <seb@delphix.com>
|
||||
Sen Haerens <sen@senhaerens.be>
|
||||
@ -591,11 +575,9 @@ CONTRIBUTORS:
|
||||
Shaun Tancheff <shaun@aeonazure.com>
|
||||
Shawn Bayern <sbayern@law.fsu.edu>
|
||||
Shengqi Chen <harry-chen@outlook.com>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Shen Yan <shenyanxxxy@qq.com>
|
||||
Sietse <sietse@wizdom.nu>
|
||||
Simon Guest <simon.guest@tesujimath.org>
|
||||
Simon Howard <fraggle@soulsphere.org>
|
||||
Simon Klinkert <simon.klinkert@gmail.com>
|
||||
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
||||
Spencer Kinny <spencerkinny1995@gmail.com>
|
||||
@ -617,7 +599,6 @@ CONTRIBUTORS:
|
||||
Stéphane Lesimple <speed47_github@speed47.net>
|
||||
Suman Chakravartula <schakrava@gmail.com>
|
||||
Sydney Vanda <sydney.m.vanda@intel.com>
|
||||
Syed Shahrukh Hussain <syed.shahrukh@ossrevival.org>
|
||||
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
Teodor Spæren <teodor_spaeren@riseup.net>
|
||||
@ -635,9 +616,7 @@ CONTRIBUTORS:
|
||||
timor <timor.dd@googlemail.com>
|
||||
Timothy Day <tday141@gmail.com>
|
||||
Tim Schumacher <timschumi@gmx.de>
|
||||
Tim Smith <tim@mondoo.com>
|
||||
Tino Reichardt <milky-zfs@mcmilk.de>
|
||||
tleydxdy <shironeko.github@tesaguri.club>
|
||||
Tobin Harding <me@tobin.cc>
|
||||
Todd Seidelmann <seidelma@users.noreply.github.com>
|
||||
Tom Caputi <tcaputi@datto.com>
|
||||
@ -661,7 +640,6 @@ CONTRIBUTORS:
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com>
|
||||
Valmiky Arquissandas <kayvlim@gmail.com>
|
||||
Val Packett <val@packett.cool>
|
||||
Vandana Rungta <vrungta@amazon.com>
|
||||
Vince van Oosten <techhazard@codeforyouand.me>
|
||||
Violet Purcell <vimproved@inventati.org>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com>
|
||||
|
4
META
4
META
@ -1,10 +1,10 @@
|
||||
Meta: 1
|
||||
Name: zfs
|
||||
Branch: 1.0
|
||||
Version: 2.3.99
|
||||
Version: 2.3.0
|
||||
Release: 1
|
||||
Release-Tags: relext
|
||||
License: CDDL
|
||||
Author: OpenZFS
|
||||
Linux-Maximum: 6.15
|
||||
Linux-Maximum: 6.12
|
||||
Linux-Minimum: 4.18
|
||||
|
@ -112,10 +112,6 @@ commitcheck:
|
||||
${top_srcdir}/scripts/commitcheck.sh; \
|
||||
fi
|
||||
|
||||
CHECKS += spdxcheck
|
||||
spdxcheck:
|
||||
$(AM_V_at)$(top_srcdir)/scripts/spdxcheck.pl
|
||||
|
||||
if HAVE_PARALLEL
|
||||
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
||||
else
|
||||
|
@ -28,7 +28,7 @@ Two release branches are maintained for OpenZFS, they are:
|
||||
Minor changes to support these distribution kernels will be applied as
|
||||
needed. New kernel versions released after the OpenZFS LTS release are
|
||||
not supported. LTS releases will receive patches for at least 2 years.
|
||||
The current LTS release is OpenZFS 2.2.
|
||||
The current LTS release is OpenZFS 2.1.
|
||||
|
||||
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
||||
includes support for the latest OpenZFS features and recently releases
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
#
|
||||
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
||||
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out ZFS ARC Statistics exported via kstat(1)
|
||||
# For a definition of fields, or usage, use arcstat -v
|
||||
@ -735,14 +734,13 @@ def calculate():
|
||||
v[group["percent"]] if v[group["percent"]] > 0 else 0
|
||||
|
||||
if l2exist:
|
||||
l2asize = cur["l2_asize"]
|
||||
v["l2hits"] = d["l2_hits"] / sint
|
||||
v["l2miss"] = d["l2_misses"] / sint
|
||||
v["l2read"] = v["l2hits"] + v["l2miss"]
|
||||
v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
|
||||
|
||||
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
||||
v["l2asize"] = l2asize
|
||||
v["l2asize"] = cur["l2_asize"]
|
||||
v["l2size"] = cur["l2_size"]
|
||||
v["l2bytes"] = d["l2_read_bytes"] / sint
|
||||
v["l2wbytes"] = d["l2_write_bytes"] / sint
|
||||
@ -752,11 +750,11 @@ def calculate():
|
||||
v["l2mru"] = cur["l2_mru_asize"]
|
||||
v["l2data"] = cur["l2_bufc_data_asize"]
|
||||
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
||||
v["l2pref%"] = 100 * v["l2pref"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mru%"] = 100 * v["l2mru"] / l2asize if l2asize > 0 else 0
|
||||
v["l2data%"] = 100 * v["l2data"] / l2asize if l2asize > 0 else 0
|
||||
v["l2meta%"] = 100 * v["l2meta"] / l2asize if l2asize > 0 else 0
|
||||
v["l2pref%"] = 100 * v["l2pref"] / v["l2asize"]
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] / v["l2asize"]
|
||||
v["l2mru%"] = 100 * v["l2mru"] / v["l2asize"]
|
||||
v["l2data%"] = 100 * v["l2data"] / v["l2asize"]
|
||||
v["l2meta%"] = 100 * v["l2meta"] / v["l2asize"]
|
||||
|
||||
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
||||
v["need"] = cur["arc_need_free"]
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out statistics for all cached dmu buffers. This information
|
||||
# is available through the dbufs kstat and may be post-processed as
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
130
cmd/zdb/zdb.c
130
cmd/zdb/zdb.c
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -123,7 +122,7 @@ static int flagbits[256];
|
||||
|
||||
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
|
||||
static int leaked_objects = 0;
|
||||
static zfs_range_tree_t *mos_refd_objs;
|
||||
static range_tree_t *mos_refd_objs;
|
||||
static spa_t *spa;
|
||||
static objset_t *os;
|
||||
static boolean_t kernel_init_done;
|
||||
@ -326,7 +325,7 @@ typedef struct metaslab_verify {
|
||||
/*
|
||||
* What's currently allocated for this metaslab.
|
||||
*/
|
||||
zfs_range_tree_t *mv_allocated;
|
||||
range_tree_t *mv_allocated;
|
||||
} metaslab_verify_t;
|
||||
|
||||
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
|
||||
@ -418,7 +417,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
|
||||
uint64_t txg = sme->sme_txg;
|
||||
|
||||
if (sme->sme_type == SM_ALLOC) {
|
||||
if (zfs_range_tree_contains(mv->mv_allocated,
|
||||
if (range_tree_contains(mv->mv_allocated,
|
||||
offset, size)) {
|
||||
(void) printf("ERROR: DOUBLE ALLOC: "
|
||||
"%llu [%llx:%llx] "
|
||||
@ -427,11 +426,11 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
|
||||
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
|
||||
(u_longlong_t)mv->mv_msid);
|
||||
} else {
|
||||
zfs_range_tree_add(mv->mv_allocated,
|
||||
range_tree_add(mv->mv_allocated,
|
||||
offset, size);
|
||||
}
|
||||
} else {
|
||||
if (!zfs_range_tree_contains(mv->mv_allocated,
|
||||
if (!range_tree_contains(mv->mv_allocated,
|
||||
offset, size)) {
|
||||
(void) printf("ERROR: DOUBLE FREE: "
|
||||
"%llu [%llx:%llx] "
|
||||
@ -440,7 +439,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
|
||||
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
|
||||
(u_longlong_t)mv->mv_msid);
|
||||
} else {
|
||||
zfs_range_tree_remove(mv->mv_allocated,
|
||||
range_tree_remove(mv->mv_allocated,
|
||||
offset, size);
|
||||
}
|
||||
}
|
||||
@ -615,11 +614,11 @@ livelist_metaslab_validate(spa_t *spa)
|
||||
(longlong_t)vd->vdev_ms_count);
|
||||
|
||||
uint64_t shift, start;
|
||||
zfs_range_seg_type_t type =
|
||||
range_seg_type_t type =
|
||||
metaslab_calculate_range_tree_type(vd, m,
|
||||
&start, &shift);
|
||||
metaslab_verify_t mv;
|
||||
mv.mv_allocated = zfs_range_tree_create(NULL,
|
||||
mv.mv_allocated = range_tree_create(NULL,
|
||||
type, NULL, start, shift);
|
||||
mv.mv_vdid = vd->vdev_id;
|
||||
mv.mv_msid = m->ms_id;
|
||||
@ -634,8 +633,8 @@ livelist_metaslab_validate(spa_t *spa)
|
||||
spacemap_check_ms_sm(m->ms_sm, &mv);
|
||||
spacemap_check_sm_log(spa, &mv);
|
||||
|
||||
zfs_range_tree_vacate(mv.mv_allocated, NULL, NULL);
|
||||
zfs_range_tree_destroy(mv.mv_allocated);
|
||||
range_tree_vacate(mv.mv_allocated, NULL, NULL);
|
||||
range_tree_destroy(mv.mv_allocated);
|
||||
zfs_btree_clear(&mv.mv_livelist_allocs);
|
||||
zfs_btree_destroy(&mv.mv_livelist_allocs);
|
||||
}
|
||||
@ -1634,9 +1633,9 @@ static void
|
||||
dump_metaslab_stats(metaslab_t *msp)
|
||||
{
|
||||
char maxbuf[32];
|
||||
zfs_range_tree_t *rt = msp->ms_allocatable;
|
||||
range_tree_t *rt = msp->ms_allocatable;
|
||||
zfs_btree_t *t = &msp->ms_allocatable_by_size;
|
||||
int free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
|
||||
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
|
||||
|
||||
/* max sure nicenum has enough space */
|
||||
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
|
||||
@ -1647,7 +1646,7 @@ dump_metaslab_stats(metaslab_t *msp)
|
||||
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
|
||||
"freepct", free_pct);
|
||||
(void) printf("\tIn-memory histogram:\n");
|
||||
dump_histogram(rt->rt_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1669,7 +1668,7 @@ dump_metaslab(metaslab_t *msp)
|
||||
if (dump_opt['m'] > 2 && !dump_opt['L']) {
|
||||
mutex_enter(&msp->ms_lock);
|
||||
VERIFY0(metaslab_load(msp));
|
||||
zfs_range_tree_stat_verify(msp->ms_allocatable);
|
||||
range_tree_stat_verify(msp->ms_allocatable);
|
||||
dump_metaslab_stats(msp);
|
||||
metaslab_unload(msp);
|
||||
mutex_exit(&msp->ms_lock);
|
||||
@ -1770,8 +1769,7 @@ dump_metaslab_groups(spa_t *spa, boolean_t show_special)
|
||||
(void) printf("%3llu%%\n",
|
||||
(u_longlong_t)mg->mg_fragmentation);
|
||||
}
|
||||
dump_histogram(mg->mg_histogram,
|
||||
ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
}
|
||||
|
||||
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
|
||||
@ -1780,7 +1778,7 @@ dump_metaslab_groups(spa_t *spa, boolean_t show_special)
|
||||
(void) printf("\t%3s\n", "-");
|
||||
else
|
||||
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
|
||||
dump_histogram(mc->mc_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2060,8 +2058,6 @@ dump_ddt_object(ddt_t *ddt, ddt_type_t type, ddt_class_t class)
|
||||
if (dump_opt['D'] < 3)
|
||||
return;
|
||||
|
||||
(void) printf("%s: object=%llu\n", name,
|
||||
(u_longlong_t)ddt->ddt_object[type][class]);
|
||||
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
|
||||
|
||||
if (dump_opt['D'] < 4)
|
||||
@ -2296,12 +2292,12 @@ dump_dtl(vdev_t *vd, int indent)
|
||||
required ? "DTL-required" : "DTL-expendable");
|
||||
|
||||
for (int t = 0; t < DTL_TYPES; t++) {
|
||||
zfs_range_tree_t *rt = vd->vdev_dtl[t];
|
||||
if (zfs_range_tree_space(rt) == 0)
|
||||
range_tree_t *rt = vd->vdev_dtl[t];
|
||||
if (range_tree_space(rt) == 0)
|
||||
continue;
|
||||
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
|
||||
indent + 2, "", name[t]);
|
||||
zfs_range_tree_walk(rt, dump_dtl_seg, prefix);
|
||||
range_tree_walk(rt, dump_dtl_seg, prefix);
|
||||
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
|
||||
dump_spacemap(spa->spa_meta_objset,
|
||||
vd->vdev_dtl_sm);
|
||||
@ -2545,14 +2541,12 @@ snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
|
||||
|
||||
blkbuf[0] = '\0';
|
||||
|
||||
for (i = 0; i < ndvas; i++) {
|
||||
for (i = 0; i < ndvas; i++)
|
||||
(void) snprintf(blkbuf + strlen(blkbuf),
|
||||
buflen - strlen(blkbuf), "%llu:%llx:%llx%s ",
|
||||
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
|
||||
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
|
||||
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
|
||||
(u_longlong_t)DVA_GET_ASIZE(&dva[i]),
|
||||
(DVA_GET_GANG(&dva[i]) ? "G" : ""));
|
||||
}
|
||||
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
|
||||
|
||||
if (BP_IS_HOLE(bp)) {
|
||||
(void) snprintf(blkbuf + strlen(blkbuf),
|
||||
@ -5867,7 +5861,7 @@ zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
|
||||
* Find the block. This will create the entry in memory, but
|
||||
* we'll know if that happened by its refcount.
|
||||
*/
|
||||
ddt_entry_t *dde = ddt_lookup(ddt, bp, B_TRUE);
|
||||
ddt_entry_t *dde = ddt_lookup(ddt, bp);
|
||||
|
||||
/*
|
||||
* ddt_lookup() can return NULL if this block didn't exist
|
||||
@ -6264,9 +6258,9 @@ load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
|
||||
return (0);
|
||||
|
||||
if (sme->sme_type == SM_ALLOC)
|
||||
zfs_range_tree_add(svr->svr_allocd_segs, offset, size);
|
||||
range_tree_add(svr->svr_allocd_segs, offset, size);
|
||||
else
|
||||
zfs_range_tree_remove(svr->svr_allocd_segs, offset, size);
|
||||
range_tree_remove(svr->svr_allocd_segs, offset, size);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -6320,20 +6314,18 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
|
||||
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
|
||||
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
|
||||
|
||||
ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
|
||||
ASSERT0(range_tree_space(svr->svr_allocd_segs));
|
||||
|
||||
zfs_range_tree_t *allocs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
|
||||
NULL, 0, 0);
|
||||
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
|
||||
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
|
||||
metaslab_t *msp = vd->vdev_ms[msi];
|
||||
|
||||
ASSERT0(zfs_range_tree_space(allocs));
|
||||
ASSERT0(range_tree_space(allocs));
|
||||
if (msp->ms_sm != NULL)
|
||||
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
|
||||
zfs_range_tree_vacate(allocs, zfs_range_tree_add,
|
||||
svr->svr_allocd_segs);
|
||||
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
|
||||
}
|
||||
zfs_range_tree_destroy(allocs);
|
||||
range_tree_destroy(allocs);
|
||||
|
||||
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
|
||||
|
||||
@ -6342,12 +6334,12 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
|
||||
* because we have not allocated mappings for
|
||||
* it yet.
|
||||
*/
|
||||
zfs_range_tree_clear(svr->svr_allocd_segs,
|
||||
range_tree_clear(svr->svr_allocd_segs,
|
||||
vdev_indirect_mapping_max_offset(vim),
|
||||
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
|
||||
|
||||
zcb->zcb_removing_size += zfs_range_tree_space(svr->svr_allocd_segs);
|
||||
zfs_range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
|
||||
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
|
||||
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
|
||||
|
||||
spa_config_exit(spa, SCL_CONFIG, FTAG);
|
||||
}
|
||||
@ -6450,8 +6442,7 @@ checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
|
||||
* also verify that the entry is there to begin with.
|
||||
*/
|
||||
mutex_enter(&ms->ms_lock);
|
||||
zfs_range_tree_remove(ms->ms_allocatable, sme->sme_offset,
|
||||
sme->sme_run);
|
||||
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
|
||||
mutex_exit(&ms->ms_lock);
|
||||
|
||||
cseea->cseea_checkpoint_size += sme->sme_run;
|
||||
@ -6582,9 +6573,9 @@ load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
|
||||
return (0);
|
||||
|
||||
if (*uic_maptype == sme->sme_type)
|
||||
zfs_range_tree_add(ms->ms_allocatable, offset, size);
|
||||
range_tree_add(ms->ms_allocatable, offset, size);
|
||||
else
|
||||
zfs_range_tree_remove(ms->ms_allocatable, offset, size);
|
||||
range_tree_remove(ms->ms_allocatable, offset, size);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -6618,7 +6609,7 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
|
||||
(longlong_t)vd->vdev_ms_count);
|
||||
|
||||
mutex_enter(&msp->ms_lock);
|
||||
zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
|
||||
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
|
||||
|
||||
/*
|
||||
* We don't want to spend the CPU manipulating the
|
||||
@ -6651,7 +6642,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
|
||||
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
|
||||
|
||||
mutex_enter(&msp->ms_lock);
|
||||
zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
|
||||
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
|
||||
|
||||
/*
|
||||
* We don't want to spend the CPU manipulating the
|
||||
@ -6675,7 +6666,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
|
||||
*/
|
||||
ASSERT3U(ent_offset + ent_len, <=,
|
||||
msp->ms_start + msp->ms_size);
|
||||
zfs_range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
|
||||
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
|
||||
}
|
||||
|
||||
if (!msp->ms_loaded)
|
||||
@ -6821,7 +6812,7 @@ zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
|
||||
for (uint64_t inner_offset = 0;
|
||||
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
|
||||
inner_offset += 1ULL << vd->vdev_ashift) {
|
||||
if (zfs_range_tree_contains(msp->ms_allocatable,
|
||||
if (range_tree_contains(msp->ms_allocatable,
|
||||
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
|
||||
obsolete_bytes += 1ULL << vd->vdev_ashift;
|
||||
}
|
||||
@ -6904,10 +6895,10 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
|
||||
* not referenced, which is not a bug.
|
||||
*/
|
||||
if (vd->vdev_ops == &vdev_indirect_ops) {
|
||||
zfs_range_tree_vacate(msp->ms_allocatable,
|
||||
range_tree_vacate(msp->ms_allocatable,
|
||||
NULL, NULL);
|
||||
} else {
|
||||
zfs_range_tree_vacate(msp->ms_allocatable,
|
||||
range_tree_vacate(msp->ms_allocatable,
|
||||
zdb_leak, vd);
|
||||
}
|
||||
if (msp->ms_loaded) {
|
||||
@ -7805,7 +7796,7 @@ verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
|
||||
* their respective ms_allocateable trees should not contain them.
|
||||
*/
|
||||
mutex_enter(&ms->ms_lock);
|
||||
zfs_range_tree_verify_not_present(ms->ms_allocatable,
|
||||
range_tree_verify_not_present(ms->ms_allocatable,
|
||||
sme->sme_offset, sme->sme_run);
|
||||
mutex_exit(&ms->ms_lock);
|
||||
|
||||
@ -7956,9 +7947,8 @@ verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
|
||||
* This way we ensure that none of the blocks that
|
||||
* are part of the checkpoint were freed by mistake.
|
||||
*/
|
||||
zfs_range_tree_walk(ckpoint_msp->ms_allocatable,
|
||||
(zfs_range_tree_func_t *)
|
||||
zfs_range_tree_verify_not_present,
|
||||
range_tree_walk(ckpoint_msp->ms_allocatable,
|
||||
(range_tree_func_t *)range_tree_verify_not_present,
|
||||
current_msp->ms_allocatable);
|
||||
}
|
||||
}
|
||||
@ -8098,7 +8088,7 @@ static void
|
||||
mos_obj_refd(uint64_t obj)
|
||||
{
|
||||
if (obj != 0 && mos_refd_objs != NULL)
|
||||
zfs_range_tree_add(mos_refd_objs, obj, 1);
|
||||
range_tree_add(mos_refd_objs, obj, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -8108,8 +8098,8 @@ static void
|
||||
mos_obj_refd_multiple(uint64_t obj)
|
||||
{
|
||||
if (obj != 0 && mos_refd_objs != NULL &&
|
||||
!zfs_range_tree_contains(mos_refd_objs, obj, 1))
|
||||
zfs_range_tree_add(mos_refd_objs, obj, 1);
|
||||
!range_tree_contains(mos_refd_objs, obj, 1))
|
||||
range_tree_add(mos_refd_objs, obj, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -8306,8 +8296,8 @@ dump_mos_leaks(spa_t *spa)
|
||||
*/
|
||||
uint64_t object = 0;
|
||||
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
|
||||
if (zfs_range_tree_contains(mos_refd_objs, object, 1)) {
|
||||
zfs_range_tree_remove(mos_refd_objs, object, 1);
|
||||
if (range_tree_contains(mos_refd_objs, object, 1)) {
|
||||
range_tree_remove(mos_refd_objs, object, 1);
|
||||
} else {
|
||||
dmu_object_info_t doi;
|
||||
const char *name;
|
||||
@ -8325,11 +8315,11 @@ dump_mos_leaks(spa_t *spa)
|
||||
rv = 2;
|
||||
}
|
||||
}
|
||||
(void) zfs_range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
|
||||
if (!zfs_range_tree_is_empty(mos_refd_objs))
|
||||
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
|
||||
if (!range_tree_is_empty(mos_refd_objs))
|
||||
rv = 2;
|
||||
zfs_range_tree_vacate(mos_refd_objs, NULL, NULL);
|
||||
zfs_range_tree_destroy(mos_refd_objs);
|
||||
range_tree_vacate(mos_refd_objs, NULL, NULL);
|
||||
range_tree_destroy(mos_refd_objs);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -8451,8 +8441,8 @@ dump_zpool(spa_t *spa)
|
||||
|
||||
if (dump_opt['d'] || dump_opt['i']) {
|
||||
spa_feature_t f;
|
||||
mos_refd_objs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
|
||||
NULL, 0, 0);
|
||||
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
|
||||
0);
|
||||
dump_objset(dp->dp_meta_objset);
|
||||
|
||||
if (dump_opt['d'] >= 3) {
|
||||
@ -8983,7 +8973,7 @@ zdb_read_block(char *thing, spa_t *spa)
|
||||
|
||||
DVA_SET_VDEV(&dva[0], vd->vdev_id);
|
||||
DVA_SET_OFFSET(&dva[0], offset);
|
||||
DVA_SET_GANG(&dva[0], 0);
|
||||
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
|
||||
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
|
||||
|
||||
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
|
||||
@ -8998,7 +8988,7 @@ zdb_read_block(char *thing, spa_t *spa)
|
||||
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
|
||||
|
||||
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
|
||||
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
|
||||
zio = zio_root(spa, NULL, NULL, 0);
|
||||
|
||||
if (vd == vd->vdev_top) {
|
||||
/*
|
||||
@ -9048,7 +9038,7 @@ zdb_read_block(char *thing, spa_t *spa)
|
||||
const blkptr_t *b = (const blkptr_t *)(void *)
|
||||
((uintptr_t)buf + (uintptr_t)blkptr_offset);
|
||||
if (zfs_blkptr_verify(spa, b,
|
||||
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY)) {
|
||||
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY) == B_FALSE) {
|
||||
abd_return_buf_copy(pabd, buf, lsize);
|
||||
borrowed = B_FALSE;
|
||||
buf = lbuf;
|
||||
@ -9057,7 +9047,7 @@ zdb_read_block(char *thing, spa_t *spa)
|
||||
b = (const blkptr_t *)(void *)
|
||||
((uintptr_t)buf + (uintptr_t)blkptr_offset);
|
||||
if (lsize == -1 || zfs_blkptr_verify(spa, b,
|
||||
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
|
||||
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG) == B_FALSE) {
|
||||
printf("invalid block pointer at this DVA\n");
|
||||
goto out;
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -215,7 +214,6 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
vdev_stat_t *vs;
|
||||
char **lines = NULL;
|
||||
int lines_cnt = 0;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
||||
@ -407,17 +405,17 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
}
|
||||
|
||||
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
||||
|
||||
if (is_mpath_wholedisk) {
|
||||
/* Don't label device mapper or multipath disks. */
|
||||
zed_log_msg(LOG_INFO,
|
||||
" it's a multipath wholedisk, don't label");
|
||||
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt);
|
||||
if (rc != 0) {
|
||||
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt) != 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zpool_prepare_disk: could not "
|
||||
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
|
||||
libzfs_error_description(g_zfshdl), path, rc);
|
||||
"prepare '%s' (%s)", fullpath,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
@ -448,13 +446,12 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
* If this is a request to label a whole disk, then attempt to
|
||||
* write out the label.
|
||||
*/
|
||||
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt);
|
||||
if (rc != 0) {
|
||||
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
|
||||
zed_log_msg(LOG_WARNING,
|
||||
" zpool_prepare_and_label_disk: could not "
|
||||
"label '%s' (%s), rc = %d", leafname,
|
||||
libzfs_error_description(g_zfshdl), rc);
|
||||
"label '%s' (%s)", leafname,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -404,7 +403,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
||||
const char *devtype;
|
||||
char *devname;
|
||||
boolean_t skip_removal = B_FALSE;
|
||||
|
||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
||||
&devtype) == 0) {
|
||||
@ -442,28 +440,18 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
||||
(uint64_t **)&vs, &c);
|
||||
|
||||
if (vs->vs_state == VDEV_STATE_OFFLINE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If state removed is requested for already removed vdev,
|
||||
* its a loopback event from spa_async_remove(). Just
|
||||
* ignore it.
|
||||
*/
|
||||
if ((vs->vs_state == VDEV_STATE_REMOVED &&
|
||||
state == VDEV_STATE_REMOVED)) {
|
||||
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
|
||||
nvlist_exists(nvl, "by_kernel")) {
|
||||
skip_removal = B_TRUE;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if ((vs->vs_state == VDEV_STATE_REMOVED && state ==
|
||||
VDEV_STATE_REMOVED) || vs->vs_state == VDEV_STATE_OFFLINE)
|
||||
return;
|
||||
|
||||
/* Remove the vdev since device is unplugged */
|
||||
int remove_status = 0;
|
||||
if (!skip_removal && (l2arc ||
|
||||
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
|
||||
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
|
||||
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
||||
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
||||
", err:%d", devname, libzfs_errno(zhdl));
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
# shellcheck disable=SC2154
|
||||
#
|
||||
# CDDL HEADER START
|
||||
|
@ -283,11 +283,6 @@ zed_notify_email()
|
||||
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
|
||||
# inject subject header
|
||||
printf "Subject: %s\n" "${subject}"
|
||||
# The following empty line is needed to separate the header from the
|
||||
# body of the message. Otherwise programs like sendmail will skip
|
||||
# everything up to the first empty line (or wont send an email at
|
||||
# all) and will still exit with exit code 0
|
||||
printf "\n"
|
||||
fi
|
||||
# output message
|
||||
cat "${pathname}"
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -37,7 +36,6 @@
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include <sys/debug.h>
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <getopt.h>
|
||||
#include <libgen.h>
|
||||
@ -122,7 +120,6 @@ static int zfs_do_change_key(int argc, char **argv);
|
||||
static int zfs_do_project(int argc, char **argv);
|
||||
static int zfs_do_version(int argc, char **argv);
|
||||
static int zfs_do_redact(int argc, char **argv);
|
||||
static int zfs_do_rewrite(int argc, char **argv);
|
||||
static int zfs_do_wait(int argc, char **argv);
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
@ -195,7 +192,6 @@ typedef enum {
|
||||
HELP_CHANGE_KEY,
|
||||
HELP_VERSION,
|
||||
HELP_REDACT,
|
||||
HELP_REWRITE,
|
||||
HELP_JAIL,
|
||||
HELP_UNJAIL,
|
||||
HELP_WAIT,
|
||||
@ -230,7 +226,7 @@ static zfs_command_t command_table[] = {
|
||||
{ "promote", zfs_do_promote, HELP_PROMOTE },
|
||||
{ "rename", zfs_do_rename, HELP_RENAME },
|
||||
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
|
||||
{ "diff", zfs_do_diff, HELP_DIFF },
|
||||
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
|
||||
{ NULL },
|
||||
{ "list", zfs_do_list, HELP_LIST },
|
||||
{ NULL },
|
||||
@ -252,31 +248,27 @@ static zfs_command_t command_table[] = {
|
||||
{ NULL },
|
||||
{ "send", zfs_do_send, HELP_SEND },
|
||||
{ "receive", zfs_do_receive, HELP_RECEIVE },
|
||||
{ "redact", zfs_do_redact, HELP_REDACT },
|
||||
{ NULL },
|
||||
{ "allow", zfs_do_allow, HELP_ALLOW },
|
||||
{ NULL },
|
||||
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
|
||||
{ NULL },
|
||||
{ "hold", zfs_do_hold, HELP_HOLD },
|
||||
{ "holds", zfs_do_holds, HELP_HOLDS },
|
||||
{ "release", zfs_do_release, HELP_RELEASE },
|
||||
{ NULL },
|
||||
{ "diff", zfs_do_diff, HELP_DIFF },
|
||||
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
|
||||
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
|
||||
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
|
||||
{ NULL },
|
||||
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
|
||||
{ "rewrite", zfs_do_rewrite, HELP_REWRITE },
|
||||
{ "redact", zfs_do_redact, HELP_REDACT },
|
||||
{ "wait", zfs_do_wait, HELP_WAIT },
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
{ NULL },
|
||||
{ "jail", zfs_do_jail, HELP_JAIL },
|
||||
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
{ NULL },
|
||||
{ "zone", zfs_do_zone, HELP_ZONE },
|
||||
{ "unzone", zfs_do_unzone, HELP_UNZONE },
|
||||
#endif
|
||||
@ -439,9 +431,6 @@ get_usage(zfs_help_t idx)
|
||||
case HELP_REDACT:
|
||||
return (gettext("\tredact <snapshot> <bookmark> "
|
||||
"<redaction_snapshot> ...\n"));
|
||||
case HELP_REWRITE:
|
||||
return (gettext("\trewrite [-rvx] [-o <offset>] [-l <length>] "
|
||||
"<directory|file ...>\n"));
|
||||
case HELP_JAIL:
|
||||
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
|
||||
case HELP_UNJAIL:
|
||||
@ -2996,8 +2985,7 @@ us_type2str(unsigned field_type)
|
||||
}
|
||||
|
||||
static int
|
||||
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
uint64_t default_quota)
|
||||
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
|
||||
{
|
||||
us_cbdata_t *cb = (us_cbdata_t *)arg;
|
||||
zfs_userquota_prop_t prop = cb->cb_prop;
|
||||
@ -3153,7 +3141,7 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
prop == ZFS_PROP_PROJECTUSED) {
|
||||
propname = "used";
|
||||
if (!nvlist_exists(props, "quota"))
|
||||
(void) nvlist_add_uint64(props, "quota", default_quota);
|
||||
(void) nvlist_add_uint64(props, "quota", 0);
|
||||
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
|
||||
prop == ZFS_PROP_PROJECTQUOTA) {
|
||||
propname = "quota";
|
||||
@ -3162,10 +3150,8 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
} else if (prop == ZFS_PROP_USEROBJUSED ||
|
||||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
|
||||
propname = "objused";
|
||||
if (!nvlist_exists(props, "objquota")) {
|
||||
(void) nvlist_add_uint64(props, "objquota",
|
||||
default_quota);
|
||||
}
|
||||
if (!nvlist_exists(props, "objquota"))
|
||||
(void) nvlist_add_uint64(props, "objquota", 0);
|
||||
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
|
||||
prop == ZFS_PROP_GROUPOBJQUOTA ||
|
||||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
|
||||
@ -4453,7 +4439,7 @@ zfs_do_rollback(int argc, char **argv)
|
||||
if (cb.cb_create > 0)
|
||||
min_txg = cb.cb_create;
|
||||
|
||||
if ((ret = zfs_iter_snapshots_sorted_v2(zhp, 0, rollback_check, &cb,
|
||||
if ((ret = zfs_iter_snapshots_v2(zhp, 0, rollback_check, &cb,
|
||||
min_txg, 0)) != 0)
|
||||
goto out;
|
||||
if ((ret = zfs_iter_bookmarks_v2(zhp, 0, rollback_check, &cb)) != 0)
|
||||
@ -5306,7 +5292,6 @@ zfs_do_receive(int argc, char **argv)
|
||||
#define ZFS_DELEG_PERM_SHARE "share"
|
||||
#define ZFS_DELEG_PERM_SEND "send"
|
||||
#define ZFS_DELEG_PERM_RECEIVE "receive"
|
||||
#define ZFS_DELEG_PERM_RECEIVE_APPEND "receive:append"
|
||||
#define ZFS_DELEG_PERM_ALLOW "allow"
|
||||
#define ZFS_DELEG_PERM_USERPROP "userprop"
|
||||
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
|
||||
@ -9026,192 +9011,6 @@ zfs_do_project(int argc, char **argv)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_file(const char *path, boolean_t verbose, zfs_rewrite_args_t *args)
|
||||
{
|
||||
int fd, ret = 0;
|
||||
|
||||
fd = open(path, O_WRONLY);
|
||||
if (fd < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
if (ioctl(fd, ZFS_IOC_REWRITE, args) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to rewrite %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
} else if (verbose) {
|
||||
printf("%s\n", path);
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_dir(const char *path, boolean_t verbose, boolean_t xdev, dev_t dev,
|
||||
zfs_rewrite_args_t *args, nvlist_t *dirs)
|
||||
{
|
||||
struct dirent *ent;
|
||||
DIR *dir;
|
||||
int ret = 0, err;
|
||||
|
||||
dir = opendir(path);
|
||||
if (dir == NULL) {
|
||||
if (errno == ENOENT)
|
||||
return (0);
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to opendir %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
size_t plen = strlen(path) + 1;
|
||||
while ((ent = readdir(dir)) != NULL) {
|
||||
char *fullname;
|
||||
struct stat st;
|
||||
|
||||
if (ent->d_type != DT_REG && ent->d_type != DT_DIR)
|
||||
continue;
|
||||
|
||||
if (strcmp(ent->d_name, ".") == 0 ||
|
||||
strcmp(ent->d_name, "..") == 0)
|
||||
continue;
|
||||
|
||||
if (plen + strlen(ent->d_name) >= PATH_MAX) {
|
||||
(void) fprintf(stderr, gettext("path too long %s/%s\n"),
|
||||
path, ent->d_name);
|
||||
ret = ENAMETOOLONG;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (asprintf(&fullname, "%s/%s", path, ent->d_name) == -1) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("failed to allocate memory\n"));
|
||||
ret = ENOMEM;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (xdev) {
|
||||
if (lstat(fullname, &st) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr,
|
||||
gettext("failed to stat %s: %s\n"),
|
||||
fullname, strerror(errno));
|
||||
free(fullname);
|
||||
continue;
|
||||
}
|
||||
if (st.st_dev != dev) {
|
||||
free(fullname);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (ent->d_type == DT_REG) {
|
||||
err = zfs_rewrite_file(fullname, verbose, args);
|
||||
if (err)
|
||||
ret = err;
|
||||
} else { /* DT_DIR */
|
||||
fnvlist_add_uint64(dirs, fullname, dev);
|
||||
}
|
||||
|
||||
free(fullname);
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_path(const char *path, boolean_t verbose, boolean_t recurse,
|
||||
boolean_t xdev, zfs_rewrite_args_t *args, nvlist_t *dirs)
|
||||
{
|
||||
struct stat st;
|
||||
int ret = 0;
|
||||
|
||||
if (lstat(path, &st) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to stat %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
if (S_ISREG(st.st_mode)) {
|
||||
ret = zfs_rewrite_file(path, verbose, args);
|
||||
} else if (S_ISDIR(st.st_mode) && recurse) {
|
||||
ret = zfs_rewrite_dir(path, verbose, xdev, st.st_dev, args,
|
||||
dirs);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_do_rewrite(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, err, c;
|
||||
boolean_t recurse = B_FALSE, verbose = B_FALSE, xdev = B_FALSE;
|
||||
|
||||
if (argc < 2)
|
||||
usage(B_FALSE);
|
||||
|
||||
zfs_rewrite_args_t args;
|
||||
memset(&args, 0, sizeof (args));
|
||||
|
||||
while ((c = getopt(argc, argv, "l:o:rvx")) != -1) {
|
||||
switch (c) {
|
||||
case 'l':
|
||||
args.len = strtoll(optarg, NULL, 0);
|
||||
break;
|
||||
case 'o':
|
||||
args.off = strtoll(optarg, NULL, 0);
|
||||
break;
|
||||
case 'r':
|
||||
recurse = B_TRUE;
|
||||
break;
|
||||
case 'v':
|
||||
verbose = B_TRUE;
|
||||
break;
|
||||
case 'x':
|
||||
xdev = B_TRUE;
|
||||
break;
|
||||
default:
|
||||
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
|
||||
optopt);
|
||||
usage(B_FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
argv += optind;
|
||||
argc -= optind;
|
||||
if (argc == 0) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("missing file or directory target(s)\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
nvlist_t *dirs = fnvlist_alloc();
|
||||
for (int i = 0; i < argc; i++) {
|
||||
err = zfs_rewrite_path(argv[i], verbose, recurse, xdev, &args,
|
||||
dirs);
|
||||
if (err)
|
||||
ret = err;
|
||||
}
|
||||
nvpair_t *dir;
|
||||
while ((dir = nvlist_next_nvpair(dirs, NULL)) != NULL) {
|
||||
err = zfs_rewrite_dir(nvpair_name(dir), verbose, xdev,
|
||||
fnvpair_value_uint64(dir), &args, dirs);
|
||||
if (err)
|
||||
ret = err;
|
||||
fnvlist_remove_nvpair(dirs, dir);
|
||||
}
|
||||
fnvlist_free(dirs);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_do_wait(int argc, char **argv)
|
||||
{
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out statistics for all zil stats. This information is
|
||||
# available through the zil kstat.
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -23,7 +22,7 @@
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
|
||||
* Copyright (c) 2017, Intel Corporation.
|
||||
* Copyright (c) 2023-2025, Klara, Inc.
|
||||
* Copyright (c) 2023-2024, Klara Inc.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -243,36 +242,6 @@ err_to_str(int err)
|
||||
return ("[unknown]");
|
||||
}
|
||||
|
||||
static const char *const iotypestrtable[ZINJECT_IOTYPES] = {
|
||||
[ZINJECT_IOTYPE_NULL] = "null",
|
||||
[ZINJECT_IOTYPE_READ] = "read",
|
||||
[ZINJECT_IOTYPE_WRITE] = "write",
|
||||
[ZINJECT_IOTYPE_FREE] = "free",
|
||||
[ZINJECT_IOTYPE_CLAIM] = "claim",
|
||||
[ZINJECT_IOTYPE_FLUSH] = "flush",
|
||||
[ZINJECT_IOTYPE_TRIM] = "trim",
|
||||
[ZINJECT_IOTYPE_ALL] = "all",
|
||||
[ZINJECT_IOTYPE_PROBE] = "probe",
|
||||
};
|
||||
|
||||
static zinject_iotype_t
|
||||
str_to_iotype(const char *arg)
|
||||
{
|
||||
for (uint_t iotype = 0; iotype < ZINJECT_IOTYPES; iotype++)
|
||||
if (iotypestrtable[iotype] != NULL &&
|
||||
strcasecmp(iotypestrtable[iotype], arg) == 0)
|
||||
return (iotype);
|
||||
return (ZINJECT_IOTYPES);
|
||||
}
|
||||
|
||||
static const char *
|
||||
iotype_to_str(zinject_iotype_t iotype)
|
||||
{
|
||||
if (iotype >= ZINJECT_IOTYPES || iotypestrtable[iotype] == NULL)
|
||||
return ("[unknown]");
|
||||
return (iotypestrtable[iotype]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print usage message.
|
||||
*/
|
||||
@ -435,30 +404,26 @@ print_data_handler(int id, const char *pool, zinject_record_t *record,
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-6s %-6s %-8s %3s %-4s "
|
||||
"%-15s %-6s %-15s\n", "ID", "POOL", "OBJSET", "OBJECT",
|
||||
"TYPE", "LVL", "DVAs", "RANGE", "MATCH", "INJECT");
|
||||
"%-15s\n", "ID", "POOL", "OBJSET", "OBJECT", "TYPE",
|
||||
"LVL", "DVAs", "RANGE");
|
||||
(void) printf("--- --------------- ------ "
|
||||
"------ -------- --- ---- --------------- "
|
||||
"------ ------\n");
|
||||
"------ -------- --- ---- ---------------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
|
||||
char rangebuf[32];
|
||||
if (record->zi_start == 0 && record->zi_end == -1ULL)
|
||||
snprintf(rangebuf, sizeof (rangebuf), "all");
|
||||
else
|
||||
snprintf(rangebuf, sizeof (rangebuf), "[%llu, %llu]",
|
||||
(u_longlong_t)record->zi_start,
|
||||
(u_longlong_t)record->zi_end);
|
||||
|
||||
|
||||
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x %-15s "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_objset,
|
||||
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x ",
|
||||
id, pool, (u_longlong_t)record->zi_objset,
|
||||
(u_longlong_t)record->zi_object, type_to_name(record->zi_type),
|
||||
record->zi_level, record->zi_dvas, rangebuf,
|
||||
record->zi_match_count, record->zi_inject_count);
|
||||
record->zi_level, record->zi_dvas);
|
||||
|
||||
|
||||
if (record->zi_start == 0 &&
|
||||
record->zi_end == -1ULL)
|
||||
(void) printf("all\n");
|
||||
else
|
||||
(void) printf("[%llu, %llu]\n", (u_longlong_t)record->zi_start,
|
||||
(u_longlong_t)record->zi_end);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -467,6 +432,10 @@ static int
|
||||
print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
void *data)
|
||||
{
|
||||
static const char *iotypestr[] = {
|
||||
"null", "read", "write", "free", "claim", "flush", "trim", "all",
|
||||
};
|
||||
|
||||
int *count = data;
|
||||
|
||||
if (record->zi_guid == 0 || record->zi_func[0] != '\0')
|
||||
@ -476,14 +445,11 @@ print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
return (0);
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-16s %-5s %-10s %-9s "
|
||||
"%-6s %-6s\n",
|
||||
"ID", "POOL", "GUID", "TYPE", "ERROR", "FREQ",
|
||||
"MATCH", "INJECT");
|
||||
(void) printf("%3s %-15s %-16s %-5s %-10s %-9s\n",
|
||||
"ID", "POOL", "GUID", "TYPE", "ERROR", "FREQ");
|
||||
(void) printf(
|
||||
"--- --------------- ---------------- "
|
||||
"----- ---------- --------- "
|
||||
"------ ------\n");
|
||||
"----- ---------- ---------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
@ -491,11 +457,9 @@ print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
double freq = record->zi_freq == 0 ? 100.0f :
|
||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
||||
|
||||
(void) printf("%3d %-15s %llx %-5s %-10s %8.4f%% "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid,
|
||||
iotype_to_str(record->zi_iotype), err_to_str(record->zi_error),
|
||||
freq, record->zi_match_count, record->zi_inject_count);
|
||||
(void) printf("%3d %-15s %llx %-5s %-10s %8.4f%%\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid, iotypestr[record->zi_iotype],
|
||||
err_to_str(record->zi_error), freq);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -513,26 +477,18 @@ print_delay_handler(int id, const char *pool, zinject_record_t *record,
|
||||
return (0);
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-16s %-10s %-5s %-9s "
|
||||
"%-6s %-6s\n",
|
||||
"ID", "POOL", "GUID", "DELAY (ms)", "LANES", "FREQ",
|
||||
"MATCH", "INJECT");
|
||||
(void) printf("--- --------------- ---------------- "
|
||||
"---------- ----- --------- "
|
||||
"------ ------\n");
|
||||
(void) printf("%3s %-15s %-15s %-15s %s\n",
|
||||
"ID", "POOL", "DELAY (ms)", "LANES", "GUID");
|
||||
(void) printf("--- --------------- --------------- "
|
||||
"--------------- ----------------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
|
||||
double freq = record->zi_freq == 0 ? 100.0f :
|
||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
||||
|
||||
(void) printf("%3d %-15s %llx %10llu %5llu %8.4f%% "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid,
|
||||
(void) printf("%3d %-15s %-15llu %-15llu %llx\n", id, pool,
|
||||
(u_longlong_t)NSEC2MSEC(record->zi_timer),
|
||||
(u_longlong_t)record->zi_nlanes,
|
||||
freq, record->zi_match_count, record->zi_inject_count);
|
||||
(u_longlong_t)record->zi_guid);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -896,7 +852,7 @@ main(int argc, char **argv)
|
||||
int quiet = 0;
|
||||
int error = 0;
|
||||
int domount = 0;
|
||||
int io_type = ZINJECT_IOTYPE_ALL;
|
||||
int io_type = ZIO_TYPES;
|
||||
int action = VDEV_STATE_UNKNOWN;
|
||||
err_type_t type = TYPE_INVAL;
|
||||
err_type_t label = TYPE_INVAL;
|
||||
@ -1090,8 +1046,19 @@ main(int argc, char **argv)
|
||||
}
|
||||
break;
|
||||
case 'T':
|
||||
io_type = str_to_iotype(optarg);
|
||||
if (io_type == ZINJECT_IOTYPES) {
|
||||
if (strcasecmp(optarg, "read") == 0) {
|
||||
io_type = ZIO_TYPE_READ;
|
||||
} else if (strcasecmp(optarg, "write") == 0) {
|
||||
io_type = ZIO_TYPE_WRITE;
|
||||
} else if (strcasecmp(optarg, "free") == 0) {
|
||||
io_type = ZIO_TYPE_FREE;
|
||||
} else if (strcasecmp(optarg, "claim") == 0) {
|
||||
io_type = ZIO_TYPE_CLAIM;
|
||||
} else if (strcasecmp(optarg, "flush") == 0) {
|
||||
io_type = ZIO_TYPE_FLUSH;
|
||||
} else if (strcasecmp(optarg, "all") == 0) {
|
||||
io_type = ZIO_TYPES;
|
||||
} else {
|
||||
(void) fprintf(stderr, "invalid I/O type "
|
||||
"'%s': must be 'read', 'write', 'free', "
|
||||
"'claim', 'flush' or 'all'\n", optarg);
|
||||
@ -1213,7 +1180,7 @@ main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (error == EILSEQ &&
|
||||
(record.zi_freq == 0 || io_type != ZINJECT_IOTYPE_READ)) {
|
||||
(record.zi_freq == 0 || io_type != ZIO_TYPE_READ)) {
|
||||
(void) fprintf(stderr, "device corrupt errors require "
|
||||
"io type read and a frequency value\n");
|
||||
libzfs_fini(g_zfs);
|
||||
@ -1228,9 +1195,9 @@ main(int argc, char **argv)
|
||||
|
||||
if (record.zi_nlanes) {
|
||||
switch (io_type) {
|
||||
case ZINJECT_IOTYPE_READ:
|
||||
case ZINJECT_IOTYPE_WRITE:
|
||||
case ZINJECT_IOTYPE_ALL:
|
||||
case ZIO_TYPE_READ:
|
||||
case ZIO_TYPE_WRITE:
|
||||
case ZIO_TYPES:
|
||||
break;
|
||||
default:
|
||||
(void) fprintf(stderr, "I/O type for a delay "
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -88,8 +87,7 @@
|
||||
|
||||
typedef struct vdev_disk_db_entry
|
||||
{
|
||||
/* 24 byte name + 1 byte NULL terminator to make GCC happy */
|
||||
char id[25];
|
||||
char id[24];
|
||||
int sector_size;
|
||||
} vdev_disk_db_entry_t;
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -521,11 +520,11 @@ get_usage(zpool_help_t idx)
|
||||
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
|
||||
"[<device> ...]\n"));
|
||||
case HELP_STATUS:
|
||||
return (gettext("\tstatus [-DdegiLPpstvx] "
|
||||
"[-c script1[,script2,...]] ...\n"
|
||||
"\t [-j|--json [--json-flat-vdevs] [--json-int] "
|
||||
"[--json-pool-key-guid]] ...\n"
|
||||
"\t [-T d|u] [--power] [pool] [interval [count]]\n"));
|
||||
return (gettext("\tstatus [--power] [-j [--json-int, "
|
||||
"--json-flat-vdevs, ...\n"
|
||||
"\t --json-pool-key-guid]] [-c [script1,script2,...]] "
|
||||
"[-dDegiLpPstvx] ...\n"
|
||||
"\t [-T d|u] [pool] [interval [count]]\n"));
|
||||
case HELP_UPGRADE:
|
||||
return (gettext("\tupgrade\n"
|
||||
"\tupgrade -v\n"
|
||||
@ -8482,19 +8481,19 @@ zpool_do_scrub(int argc, char **argv)
|
||||
|
||||
if (is_pause && is_stop) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -s and -p are mutually exclusive\n"));
|
||||
"combination :-s and -p are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_pause && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -p and -C are mutually exclusive\n"));
|
||||
"combination :-p and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_stop && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -s and -C are mutually exclusive\n"));
|
||||
"combination :-s and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_error_scrub && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -e and -C are mutually exclusive\n"));
|
||||
"combination :-e and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else {
|
||||
if (is_error_scrub)
|
||||
@ -10432,9 +10431,10 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
|
||||
break;
|
||||
|
||||
case ZPOOL_STATUS_REMOVED_DEV:
|
||||
snprintf(status, ST_SIZE, gettext("One or more devices have "
|
||||
"been removed.\n\tSufficient replicas exist for the pool "
|
||||
"to continue functioning in a\n\tdegraded state.\n"));
|
||||
snprintf(status, ST_SIZE, gettext("One or more devices has "
|
||||
"been removed by the administrator.\n\tSufficient "
|
||||
"replicas exist for the pool to continue functioning in "
|
||||
"a\n\tdegraded state.\n"));
|
||||
snprintf(action, AC_SIZE, gettext("Online the device "
|
||||
"using zpool online' or replace the device with\n\t'zpool "
|
||||
"replace'.\n"));
|
||||
@ -10979,30 +10979,28 @@ status_callback(zpool_handle_t *zhp, void *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...
|
||||
* [-j|--json [--json-flat-vdevs] [--json-int] ...
|
||||
* [--json-pool-key-guid]] [--power] [-T d|u] ...
|
||||
* [pool] [interval [count]]
|
||||
* zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
|
||||
* [-T d|u] [pool] [interval [count]]
|
||||
*
|
||||
* -c CMD For each vdev, run command CMD
|
||||
* -D Display dedup status (undocumented)
|
||||
* -d Display Direct I/O write verify errors
|
||||
* -D Display dedup status (undocumented)
|
||||
* -e Display only unhealthy vdevs
|
||||
* -g Display guid for individual vdev name.
|
||||
* -i Display vdev initialization status.
|
||||
* -j [...] Display output in JSON format
|
||||
* --json-flat-vdevs Display vdevs in flat hierarchy
|
||||
* --json-int Display numbers in integer format instead of string
|
||||
* --json-pool-key-guid Use pool GUID as key for pool objects
|
||||
* -L Follow links when resolving vdev path name.
|
||||
* -P Display full path for vdev name.
|
||||
* -p Display values in parsable (exact) format.
|
||||
* --power Display vdev enclosure slot power status
|
||||
* -P Display full path for vdev name.
|
||||
* -s Display slow IOs column.
|
||||
* -T Display a timestamp in date(1) or Unix format
|
||||
* -t Display vdev TRIM status.
|
||||
* -T Display a timestamp in date(1) or Unix format
|
||||
* -v Display complete error logs
|
||||
* -x Display only pools with potential problems
|
||||
* -j Display output in JSON format
|
||||
* --power Display vdev enclosure slot power status
|
||||
* --json-int Display numbers in inteeger format instead of string
|
||||
* --json-flat-vdevs Display vdevs in flat hierarchy
|
||||
* --json-pool-key-guid Use pool GUID as key for pool objects
|
||||
*
|
||||
* Describes the health status of all pools or some subset.
|
||||
*/
|
||||
@ -12069,11 +12067,6 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
|
||||
zfs_valstr_zio_stage(i32, flagstr,
|
||||
sizeof (flagstr));
|
||||
printf(gettext("0x%x [%s]"), i32, flagstr);
|
||||
} else if (strcmp(name,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
|
||||
zfs_valstr_zio_type(i32, flagstr,
|
||||
sizeof (flagstr));
|
||||
printf(gettext("0x%x [%s]"), i32, flagstr);
|
||||
} else if (strcmp(name,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
|
||||
zfs_valstr_zio_priority(i32, flagstr,
|
||||
@ -12753,13 +12746,11 @@ found:
|
||||
|
||||
if (strcmp(argv[1], "root") == 0)
|
||||
vdev = strdup("root-0");
|
||||
else
|
||||
vdev = strdup(argv[1]);
|
||||
|
||||
/* ... and the rest are vdev names */
|
||||
if (vdev == NULL)
|
||||
cb.cb_vdevs.cb_names = argv + 1;
|
||||
else
|
||||
cb.cb_vdevs.cb_names = &vdev;
|
||||
|
||||
cb.cb_vdevs.cb_names = &vdev;
|
||||
cb.cb_vdevs.cb_names_count = argc - 1;
|
||||
cb.cb_type = ZFS_TYPE_VDEV;
|
||||
argc = 1; /* One pool to process */
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* Gather top-level ZFS pool and resilver/scan statistics and print using
|
||||
* influxdb line protocol
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
75
cmd/ztest.c
75
cmd/ztest.c
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -1812,11 +1811,10 @@ ztest_zd_fini(ztest_ds_t *zd)
|
||||
ztest_rll_destroy(&zd->zd_range_lock[l]);
|
||||
}
|
||||
|
||||
#define DMU_TX_MIGHTWAIT \
|
||||
(ztest_random(10) == 0 ? DMU_TX_NOWAIT : DMU_TX_WAIT)
|
||||
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
|
||||
|
||||
static uint64_t
|
||||
ztest_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t txg_how, const char *tag)
|
||||
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
|
||||
{
|
||||
uint64_t txg;
|
||||
int error;
|
||||
@ -1827,12 +1825,11 @@ ztest_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t txg_how, const char *tag)
|
||||
error = dmu_tx_assign(tx, txg_how);
|
||||
if (error) {
|
||||
if (error == ERESTART) {
|
||||
ASSERT3U(txg_how, ==, DMU_TX_NOWAIT);
|
||||
ASSERT3U(txg_how, ==, TXG_NOWAIT);
|
||||
dmu_tx_wait(tx);
|
||||
} else if (error == ENOSPC) {
|
||||
ztest_record_enospc(tag);
|
||||
} else {
|
||||
ASSERT(error == EDQUOT || error == EIO);
|
||||
ASSERT3U(error, ==, ENOSPC);
|
||||
ztest_record_enospc(tag);
|
||||
}
|
||||
dmu_tx_abort(tx);
|
||||
return (0);
|
||||
@ -1994,8 +1991,7 @@ ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
|
||||
|
||||
if (write_state == WR_COPIED &&
|
||||
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
|
||||
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH |
|
||||
DMU_KEEP_CACHING) != 0) {
|
||||
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
|
||||
zil_itx_destroy(itx);
|
||||
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
|
||||
write_state = WR_NEED_COPY;
|
||||
@ -2076,7 +2072,7 @@ ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
|
||||
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
|
||||
}
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0)
|
||||
return (ENOSPC);
|
||||
|
||||
@ -2166,7 +2162,7 @@ ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
|
||||
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
|
||||
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_object_unlock(zd, object);
|
||||
return (ENOSPC);
|
||||
@ -2248,7 +2244,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
P2PHASE(offset, length) == 0)
|
||||
abuf = dmu_request_arcbuf(db, length);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
if (abuf != NULL)
|
||||
dmu_return_arcbuf(abuf);
|
||||
@ -2267,19 +2263,19 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
ASSERT(doi.doi_data_block_size);
|
||||
ASSERT0(offset % doi.doi_data_block_size);
|
||||
if (ztest_random(4) != 0) {
|
||||
dmu_flags_t flags = ztest_random(2) ?
|
||||
int prefetch = ztest_random(2) ?
|
||||
DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
|
||||
|
||||
/*
|
||||
* We will randomly set when to do O_DIRECT on a read.
|
||||
*/
|
||||
if (ztest_random(4) == 0)
|
||||
flags |= DMU_DIRECTIO;
|
||||
prefetch |= DMU_DIRECTIO;
|
||||
|
||||
ztest_block_tag_t rbt;
|
||||
|
||||
VERIFY(dmu_read(os, lr->lr_foid, offset,
|
||||
sizeof (rbt), &rbt, flags) == 0);
|
||||
sizeof (rbt), &rbt, prefetch) == 0);
|
||||
if (rbt.bt_magic == BT_MAGIC) {
|
||||
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
|
||||
offset, gen, txg, crtxg);
|
||||
@ -2310,7 +2306,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
dmu_write(os, lr->lr_foid, offset, length, data, tx);
|
||||
} else {
|
||||
memcpy(abuf->b_data, data, length);
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx, 0));
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx));
|
||||
}
|
||||
|
||||
(void) ztest_log_write(zd, tx, lr);
|
||||
@ -2346,7 +2342,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
|
||||
|
||||
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_range_unlock(rl);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
@ -2387,7 +2383,7 @@ ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_bonus(tx, lr->lr_foid);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
@ -2535,7 +2531,7 @@ ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
|
||||
object, offset, size, ZTRL_READER);
|
||||
|
||||
error = dmu_read(os, object, offset, size, buf,
|
||||
DMU_READ_NO_PREFETCH | DMU_KEEP_CACHING);
|
||||
DMU_READ_NO_PREFETCH);
|
||||
ASSERT0(error);
|
||||
} else {
|
||||
ASSERT3P(zio, !=, NULL);
|
||||
@ -2551,6 +2547,7 @@ ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
|
||||
object, offset, size, ZTRL_READER);
|
||||
|
||||
error = dmu_buf_hold_noread(os, object, offset, zgd, &db);
|
||||
|
||||
if (error == 0) {
|
||||
blkptr_t *bp = &lr->lr_blkptr;
|
||||
|
||||
@ -2804,7 +2801,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
|
||||
|
||||
dmu_tx_hold_write(tx, object, offset, size);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
|
||||
if (txg != 0) {
|
||||
dmu_prealloc(os, object, offset, size, tx);
|
||||
@ -2827,7 +2824,7 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
|
||||
enum ztest_io_type io_type;
|
||||
uint64_t blocksize;
|
||||
void *data;
|
||||
dmu_flags_t dmu_read_flags = DMU_READ_NO_PREFETCH;
|
||||
uint32_t dmu_read_flags = DMU_READ_NO_PREFETCH;
|
||||
|
||||
/*
|
||||
* We will randomly set when to do O_DIRECT on a read.
|
||||
@ -4917,7 +4914,7 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
|
||||
fatal(B_FALSE, "dmu_take_snapshot(%s) = %d", snap1name, error);
|
||||
}
|
||||
|
||||
error = dsl_dataset_clone(clone1name, snap1name);
|
||||
error = dmu_objset_clone(clone1name, snap1name);
|
||||
if (error) {
|
||||
if (error == ENOSPC) {
|
||||
ztest_record_enospc(FTAG);
|
||||
@ -4944,7 +4941,7 @@ ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
|
||||
fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap3name, error);
|
||||
}
|
||||
|
||||
error = dsl_dataset_clone(clone2name, snap3name);
|
||||
error = dmu_objset_clone(clone2name, snap3name);
|
||||
if (error) {
|
||||
if (error == ENOSPC) {
|
||||
ztest_record_enospc(FTAG);
|
||||
@ -5066,7 +5063,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
uint64_t stride = 123456789ULL;
|
||||
uint64_t width = 40;
|
||||
int free_percent = 5;
|
||||
dmu_flags_t dmu_read_flags = DMU_READ_PREFETCH;
|
||||
uint32_t dmu_read_flags = DMU_READ_PREFETCH;
|
||||
|
||||
/*
|
||||
* We will randomly set when to do O_DIRECT on a read.
|
||||
@ -5172,7 +5169,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
/* This accounts for setting the checksum/compression. */
|
||||
dmu_tx_hold_bonus(tx, bigobj);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
@ -5473,7 +5470,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
dmu_tx_hold_write(tx, packobj, packoff, packsize);
|
||||
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
|
||||
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
@ -5542,13 +5539,13 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
}
|
||||
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
|
||||
off, bigbuf_arcbufs[j], tx, 0));
|
||||
off, bigbuf_arcbufs[j], tx));
|
||||
} else {
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
|
||||
off, bigbuf_arcbufs[2 * j], tx, 0));
|
||||
off, bigbuf_arcbufs[2 * j], tx));
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
|
||||
off + chunksize / 2,
|
||||
bigbuf_arcbufs[2 * j + 1], tx, 0));
|
||||
bigbuf_arcbufs[2 * j + 1], tx));
|
||||
}
|
||||
if (i == 1) {
|
||||
dmu_buf_rele(dbt, FTAG);
|
||||
@ -5693,7 +5690,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
for (i = 0; i < 2; i++) {
|
||||
@ -5761,7 +5758,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
|
||||
@ -5795,7 +5792,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
VERIFY0(zap_remove(os, object, txgname, tx));
|
||||
@ -5838,7 +5835,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, name);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
error = zap_add(os, object, name, sizeof (uint64_t), 1,
|
||||
@ -5909,7 +5906,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
if (i >= 2) {
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
@ -6075,7 +6072,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
error = -1;
|
||||
|
||||
if (!error)
|
||||
error = dmu_tx_assign(tx, DMU_TX_NOWAIT);
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
|
||||
txg = error ? 0 : dmu_tx_get_txg(tx);
|
||||
|
||||
@ -6335,13 +6332,13 @@ ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
|
||||
fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
|
||||
}
|
||||
|
||||
error = dsl_dataset_clone(clonename, fullname);
|
||||
error = dmu_objset_clone(clonename, fullname);
|
||||
if (error) {
|
||||
if (error == ENOSPC) {
|
||||
ztest_record_enospc("dsl_dataset_clone");
|
||||
ztest_record_enospc("dmu_objset_clone");
|
||||
goto out;
|
||||
}
|
||||
fatal(B_FALSE, "dsl_dataset_clone(%s) = %d", clonename, error);
|
||||
fatal(B_FALSE, "dmu_objset_clone(%s) = %d", clonename, error);
|
||||
}
|
||||
|
||||
error = dsl_destroy_snapshot(fullname, B_TRUE);
|
||||
|
@ -10,7 +10,8 @@ AM_CPPFLAGS = \
|
||||
-I$(top_srcdir)/include \
|
||||
-I$(top_srcdir)/module/icp/include \
|
||||
-I$(top_srcdir)/lib/libspl/include \
|
||||
-I$(top_srcdir)/lib/libspl/include/os/@ac_system_l@
|
||||
-I$(top_srcdir)/lib/libspl/include/os/@ac_system_l@ \
|
||||
-I$(top_srcdir)/lib/libzpool/include
|
||||
|
||||
AM_LIBTOOLFLAGS = --silent
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
# ===========================================================================
|
||||
# https://www.gnu.org/software/autoconf-archive/ax_code_coverage.html
|
||||
# ===========================================================================
|
||||
|
@ -5,7 +5,7 @@ dnl # solution to handling automounts. Prior to this cifs/nfs clients
|
||||
dnl # which required automount support would abuse the follow_link()
|
||||
dnl # operation on directories for this purpose.
|
||||
dnl #
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_AUTOMOUNT], [
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [
|
||||
ZFS_LINUX_TEST_SRC([dentry_operations_d_automount], [
|
||||
#include <linux/dcache.h>
|
||||
static struct vfsmount *d_automount(struct path *p) { return NULL; }
|
||||
@ -15,7 +15,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_D_AUTOMOUNT], [
|
||||
])
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_D_AUTOMOUNT], [
|
||||
AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [
|
||||
AC_MSG_CHECKING([whether dops->d_automount() exists])
|
||||
ZFS_LINUX_TEST_RESULT([dentry_operations_d_automount], [
|
||||
AC_MSG_RESULT(yes)
|
||||
@ -23,40 +23,3 @@ AC_DEFUN([ZFS_AC_KERNEL_D_AUTOMOUNT], [
|
||||
ZFS_LINUX_TEST_ERROR([dops->d_automount()])
|
||||
])
|
||||
])
|
||||
|
||||
dnl #
|
||||
dnl # 6.14 API change
|
||||
dnl # dops->d_revalidate now has four args.
|
||||
dnl #
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS], [
|
||||
ZFS_LINUX_TEST_SRC([dentry_operations_d_revalidate_4args], [
|
||||
#include <linux/dcache.h>
|
||||
static int d_revalidate(struct inode *dir,
|
||||
const struct qstr *name, struct dentry *dentry,
|
||||
unsigned int fl) { return 0; }
|
||||
struct dentry_operations dops __attribute__ ((unused)) = {
|
||||
.d_revalidate = d_revalidate,
|
||||
};
|
||||
])
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_4ARGS], [
|
||||
AC_MSG_CHECKING([whether dops->d_revalidate() takes 4 args])
|
||||
ZFS_LINUX_TEST_RESULT([dentry_operations_d_revalidate_4args], [
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_D_REVALIDATE_4ARGS, 1,
|
||||
[dops->d_revalidate() takes 4 args])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [
|
||||
ZFS_AC_KERNEL_SRC_D_AUTOMOUNT
|
||||
ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [
|
||||
ZFS_AC_KERNEL_D_AUTOMOUNT
|
||||
ZFS_AC_KERNEL_D_REVALIDATE_4ARGS
|
||||
])
|
||||
|
@ -2,22 +2,6 @@ dnl #
|
||||
dnl # Supported mkdir() interfaces checked newest to oldest.
|
||||
dnl #
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [
|
||||
dnl #
|
||||
dnl # 6.15 API change
|
||||
dnl # mkdir() returns struct dentry *
|
||||
dnl #
|
||||
ZFS_LINUX_TEST_SRC([mkdir_return_dentry], [
|
||||
#include <linux/fs.h>
|
||||
|
||||
static struct dentry *mkdir(struct mnt_idmap *idmap,
|
||||
struct inode *inode, struct dentry *dentry,
|
||||
umode_t umode) { return dentry; }
|
||||
static const struct inode_operations
|
||||
iops __attribute__ ((unused)) = {
|
||||
.mkdir = mkdir,
|
||||
};
|
||||
],[])
|
||||
|
||||
dnl #
|
||||
dnl # 6.3 API change
|
||||
dnl # mkdir() takes struct mnt_idmap * as the first arg
|
||||
@ -75,40 +59,29 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [
|
||||
dnl #
|
||||
dnl # 6.15 API change
|
||||
dnl # mkdir() returns struct dentry *
|
||||
dnl # 6.3 API change
|
||||
dnl # mkdir() takes struct mnt_idmap * as the first arg
|
||||
dnl #
|
||||
AC_MSG_CHECKING([whether iops->mkdir() returns struct dentry*])
|
||||
ZFS_LINUX_TEST_RESULT([mkdir_return_dentry], [
|
||||
AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*])
|
||||
ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_IOPS_MKDIR_DENTRY, 1,
|
||||
[iops->mkdir() returns struct dentry*])
|
||||
AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1,
|
||||
[iops->mkdir() takes struct mnt_idmap*])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
|
||||
dnl #
|
||||
dnl # 6.3 API change
|
||||
dnl # mkdir() takes struct mnt_idmap * as the first arg
|
||||
dnl # 5.12 API change
|
||||
dnl # The struct user_namespace arg was added as the first argument to
|
||||
dnl # mkdir() of the iops structure.
|
||||
dnl #
|
||||
AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*])
|
||||
ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [
|
||||
AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*])
|
||||
ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1,
|
||||
[iops->mkdir() takes struct mnt_idmap*])
|
||||
AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1,
|
||||
[iops->mkdir() takes struct user_namespace*])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
|
||||
dnl #
|
||||
dnl # 5.12 API change
|
||||
dnl # The struct user_namespace arg was added as the first argument to
|
||||
dnl # mkdir() of the iops structure.
|
||||
dnl #
|
||||
AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*])
|
||||
ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1,
|
||||
[iops->mkdir() takes struct user_namespace*])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
])
|
||||
])
|
||||
])
|
||||
|
@ -11,12 +11,10 @@ AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL_HEADER], [
|
||||
#include <linux/objtool.h>
|
||||
],[
|
||||
],[
|
||||
objtool_header=$LINUX/include/linux/objtool.h
|
||||
AC_DEFINE(HAVE_KERNEL_OBJTOOL_HEADER, 1,
|
||||
[kernel has linux/objtool.h])
|
||||
AC_MSG_RESULT(linux/objtool.h)
|
||||
],[
|
||||
objtool_header=$LINUX/include/linux/frame.h
|
||||
AC_MSG_RESULT(linux/frame.h)
|
||||
])
|
||||
])
|
||||
@ -49,15 +47,6 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_OBJTOOL], [
|
||||
#error "STACK_FRAME_NON_STANDARD is not defined."
|
||||
#endif
|
||||
])
|
||||
|
||||
dnl # 6.15 made CONFIG_OBJTOOL_WERROR=y the default. We need to handle
|
||||
dnl # this or our build will fail.
|
||||
ZFS_LINUX_TEST_SRC([config_objtool_werror], [
|
||||
#if !defined(CONFIG_OBJTOOL_WERROR)
|
||||
#error "CONFIG_OBJTOOL_WERROR is not defined."
|
||||
#endif
|
||||
])
|
||||
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL], [
|
||||
@ -73,31 +62,6 @@ AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL], [
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_STACK_FRAME_NON_STANDARD, 1,
|
||||
[STACK_FRAME_NON_STANDARD is defined])
|
||||
|
||||
dnl # Needed for kernels missing the asm macro. We grep
|
||||
dnl # for it in the header file since there is currently
|
||||
dnl # no test to check the result of assembling a file.
|
||||
AC_MSG_CHECKING(
|
||||
[whether STACK_FRAME_NON_STANDARD asm macro is defined])
|
||||
dnl # Escape square brackets.
|
||||
sp='@<:@@<:@:space:@:>@@:>@'
|
||||
dotmacro='@<:@.@:>@macro'
|
||||
regexp="^$sp*$dotmacro$sp+STACK_FRAME_NON_STANDARD$sp"
|
||||
AS_IF([$EGREP -s -q "$regexp" $objtool_header],[
|
||||
AC_MSG_RESULT(yes)
|
||||
AC_DEFINE(HAVE_STACK_FRAME_NON_STANDARD_ASM, 1,
|
||||
[STACK_FRAME_NON_STANDARD asm macro is defined])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
|
||||
AC_MSG_CHECKING([whether CONFIG_OBJTOOL_WERROR is defined])
|
||||
ZFS_LINUX_TEST_RESULT([config_objtool_werror],[
|
||||
AC_MSG_RESULT(yes)
|
||||
CONFIG_OBJTOOL_WERROR_DEFINED=yes
|
||||
],[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
|
@ -1,23 +0,0 @@
|
||||
dnl #
|
||||
dnl # Linux 6.16 removed readahead_page
|
||||
dnl #
|
||||
AC_DEFUN([ZFS_AC_KERNEL_SRC_PAGEMAP_READAHEAD_PAGE], [
|
||||
ZFS_LINUX_TEST_SRC([pagemap_has_readahead_page], [
|
||||
#include <linux/pagemap.h>
|
||||
], [
|
||||
struct page *p __attribute__ ((unused)) = NULL;
|
||||
struct readahead_control *ractl __attribute__ ((unused)) = NULL;
|
||||
p = readahead_page(ractl);
|
||||
])
|
||||
])
|
||||
|
||||
AC_DEFUN([ZFS_AC_KERNEL_PAGEMAP_READAHEAD_PAGE], [
|
||||
AC_MSG_CHECKING([whether readahead_page() exists])
|
||||
ZFS_LINUX_TEST_RESULT([pagemap_has_readahead_page], [
|
||||
AC_MSG_RESULT([yes])
|
||||
AC_DEFINE(HAVE_PAGEMAP_READAHEAD_PAGE, 1,
|
||||
[readahead_page() exists])
|
||||
],[
|
||||
AC_MSG_RESULT([no])
|
||||
])
|
||||
])
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user