mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2025-12-01 09:32:08 +03:00
Add a new 'zfs-qemu-packages' GH workflow for manually building RPMs
and test installing ZFS RPMs from a yum repo. The workflow has a
dropdown menu in the Github runners tab with two options:
Build RPMs - Build release RPMs and tarballs and put them into an
artifact ZIP file. The directory structure used in
the ZIP file mirrors the ZFS yum repo.
Test repo - Test install the ZFS RPMs from the ZFS repo. On
Almalinux, this will do a DKMS and KMOD test install
from both the regular and testing repos. On Fedora,
it will do a DKMS install from the regular repo. All
test install results will be displayed in the Github
runner Summary page. Note that the workflow provides an
optional text box where you can specify the full URL to
an alternate repo. If left blank, it will install from
the default repo from the zfs-release RPM.
Most developers will never need to use this workflow. It is intended
to be used by the ZFS admins for building and testing releases.
This commit also modularizes many of the runner scripts so they can
be used by both the zfs-qemu and zfs-qemu-packages workflows.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Tino Reichardt <milky-zfs@mcmilk.de>
Signed-off-by: Tony Hutter <hutter2@llnl.gov>
Closes #17005
125 lines
2.9 KiB
Bash
Executable File
125 lines
2.9 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
######################################################################
|
|
# 5) start test machines and load openzfs module
|
|
######################################################################
|
|
|
|
set -eu
|
|
|
|
# read our defined variables
|
|
source /var/tmp/env.txt
|
|
|
|
# wait for poweroff to succeed
|
|
PID=$(pidof /usr/bin/qemu-system-x86_64)
|
|
tail --pid=$PID -f /dev/null
|
|
sudo virsh undefine openzfs
|
|
|
|
# default values per test vm:
|
|
VMs=2
|
|
CPU=2
|
|
|
|
# cpu pinning
|
|
CPUSET=("0,1" "2,3")
|
|
|
|
case "$OS" in
|
|
freebsd*)
|
|
# FreeBSD can't be optimized via ksmtuned
|
|
RAM=6
|
|
;;
|
|
*)
|
|
# Linux can be optimized via ksmtuned
|
|
RAM=8
|
|
;;
|
|
esac
|
|
|
|
# this can be different for each distro
|
|
echo "VMs=$VMs" >> $ENV
|
|
|
|
# create snapshot we can clone later
|
|
sudo zfs snapshot zpool/openzfs@now
|
|
|
|
# setup the testing vm's
|
|
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
|
for i in $(seq 1 $VMs); do
|
|
|
|
echo "Creating disk for vm$i..."
|
|
DISK="/dev/zvol/zpool/vm$i"
|
|
FORMAT="raw"
|
|
sudo zfs clone zpool/openzfs@now zpool/vm$i
|
|
sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
|
|
|
|
cat <<EOF > /tmp/user-data
|
|
#cloud-config
|
|
|
|
fqdn: vm$i
|
|
|
|
users:
|
|
- name: root
|
|
shell: $BASH
|
|
- name: zfs
|
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
shell: $BASH
|
|
ssh_authorized_keys:
|
|
- $PUBKEY
|
|
|
|
growpart:
|
|
mode: auto
|
|
devices: ['/']
|
|
ignore_growroot_disabled: false
|
|
EOF
|
|
|
|
sudo virsh net-update default add ip-dhcp-host \
|
|
"<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
|
|
|
|
sudo virt-install \
|
|
--os-variant $OSv \
|
|
--name "vm$i" \
|
|
--cpu host-passthrough \
|
|
--virt-type=kvm --hvm \
|
|
--vcpus=$CPU,sockets=1 \
|
|
--cpuset=${CPUSET[$((i-1))]} \
|
|
--memory $((1024*RAM)) \
|
|
--memballoon model=virtio \
|
|
--graphics none \
|
|
--cloud-init user-data=/tmp/user-data \
|
|
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
|
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
--disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
--import --noautoconsole >/dev/null
|
|
done
|
|
|
|
# check the memory state from time to time
|
|
cat <<EOF > cronjob.sh
|
|
# $OS
|
|
exec 1>>/var/tmp/stats.txt
|
|
exec 2>&1
|
|
echo "*******************************************************"
|
|
date
|
|
uptime
|
|
free -m
|
|
df -h /mnt/tests
|
|
zfs list
|
|
EOF
|
|
sudo chmod +x cronjob.sh
|
|
sudo mv -f cronjob.sh /root/cronjob.sh
|
|
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
|
sudo crontab crontab.txt
|
|
rm crontab.txt
|
|
|
|
# check if the machines are okay
|
|
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
|
for i in $(seq 1 $VMs); do
|
|
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
|
done
|
|
echo "All $VMs VMs are up now."
|
|
|
|
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
|
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
|
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
|
for i in $(seq 1 $VMs); do
|
|
mkdir -p $RESPATH/vm$i
|
|
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
|
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
|
done
|
|
echo "Console logging for ${VMs}x $OS started."
|