1#!/usr/bin/env bash 2# SPDX-License-Identifier: LGPL-2.1-or-later 3# vi: ts=4 sw=4 tw=0 et: 4 5set -eux 6set -o pipefail 7 8# Check if all symlinks under /dev/disk/ are valid 9# shellcheck disable=SC2120 10helper_check_device_symlinks() {( 11 set +x 12 13 local dev link path paths target 14 15 [[ $# -gt 0 ]] && paths=("$@") || paths=("/dev/disk" "/dev/mapper") 16 17 # Check if all given paths are valid 18 for path in "${paths[@]}"; do 19 if ! test -e "$path"; then 20 echo >&2 "Path '$path' doesn't exist" 21 return 1 22 fi 23 done 24 25 while read -r link; do 26 target="$(readlink -f "$link")" 27 echo "$link -> $target" 28 # Both checks should do virtually the same thing, but check both to be 29 # on the safe side 30 if [[ ! -e "$link" || ! -e "$target" ]]; then 31 echo >&2 "ERROR: symlink '$link' points to '$target' which doesn't exist" 32 return 1 33 fi 34 35 # Check if the symlink points to the correct device in /dev 36 dev="/dev/$(udevadm info -q name "$link")" 37 if [[ "$target" != "$dev" ]]; then 38 echo >&2 "ERROR: symlink '$link' points to '$target' but '$dev' was expected" 39 return 1 40 fi 41 done < <(find "${paths[@]}" -type l) 42)} 43 44testcase_megasas2_basic() { 45 lsblk -S 46 [[ "$(lsblk --scsi --noheadings | wc -l)" -ge 128 ]] 47} 48 49testcase_nvme_basic() { 50 lsblk --noheadings | grep "^nvme" 51 [[ "$(lsblk --noheadings | grep -c "^nvme")" -ge 28 ]] 52} 53 54testcase_virtio_scsi_identically_named_partitions() { 55 lsblk --noheadings -a -o NAME,PARTLABEL 56 [[ "$(lsblk --noheadings -a -o NAME,PARTLABEL | grep -c "Hello world")" -eq $((16 * 8)) ]] 57} 58 59testcase_multipath_basic_failover() { 60 local dmpath i path wwid 61 62 # Configure multipath 63 cat >/etc/multipath.conf <<\EOF 64defaults { 65 # Use /dev/mapper/$WWN paths instead of /dev/mapper/mpathX 66 user_friendly_names no 67 find_multipaths yes 68 enable_foreign "^$" 69} 70 71blacklist_exceptions { 72 property "(SCSI_IDENT_|ID_WWN)" 73} 74 75blacklist { 76} 77EOF 78 modprobe -v dm_multipath 79 systemctl start multipathd.service 80 systemctl status multipathd.service 81 multipath -ll 82 udevadm settle 83 ls -l /dev/disk/by-id/ 84 85 for i in {0..63}; do 86 wwid="deaddeadbeef$(printf "%.4d" "$i")" 87 path="/dev/disk/by-id/wwn-0x$wwid" 88 dmpath="$(readlink -f "$path")" 89 90 lsblk "$path" 91 multipath -C "$dmpath" 92 # We should have 4 active paths for each multipath device 93 [[ "$(multipath -l "$path" | grep -c running)" -eq 4 ]] 94 done 95 96 # Test failover (with the first multipath device that has a partitioned disk) 97 echo "${FUNCNAME[0]}: test failover" 98 local device expected link mpoint part 99 local -a devices 100 mpoint="$(mktemp -d /mnt/mpathXXX)" 101 wwid="deaddeadbeef0000" 102 path="/dev/disk/by-id/wwn-0x$wwid" 103 104 # All following symlinks should exists and should be valid 105 local -a part_links=( 106 "/dev/disk/by-id/wwn-0x$wwid-part2" 107 "/dev/disk/by-partlabel/failover_part" 108 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000" 109 "/dev/disk/by-label/failover_vol" 110 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111" 111 ) 112 udevadm wait --settle --timeout=30 "${part_links[@]}" 113 114 # Choose a random symlink to the failover data partition each time, for 115 # a better coverage 116 part="${part_links[$RANDOM % ${#part_links[@]}]}" 117 118 # Get all devices attached to a specific multipath device (in H:C:T:L format) 119 # and sort them in a random order, so we cut off different paths each time 120 mapfile -t devices < <(multipath -l "$path" | grep -Eo '[0-9]+:[0-9]+:[0-9]+:[0-9]+' | sort -R) 121 if [[ "${#devices[@]}" -ne 4 ]]; then 122 echo "Expected 4 devices attached to WWID=$wwid, got ${#devices[@]} instead" 123 return 1 124 fi 125 # Drop the last path from the array, since we want to leave at least one path active 126 unset "devices[3]" 127 # Mount the first multipath partition, write some data we can check later, 128 # and then disconnect the remaining paths one by one while checking if we 129 # can still read/write from the mount 130 mount -t ext4 "$part" "$mpoint" 131 expected=0 132 echo -n "$expected" >"$mpoint/test" 133 # Sanity check we actually wrote what we wanted 134 [[ "$(<"$mpoint/test")" == "$expected" ]] 135 136 for device in "${devices[@]}"; do 137 echo offline >"/sys/class/scsi_device/$device/device/state" 138 [[ "$(<"$mpoint/test")" == "$expected" ]] 139 expected="$((expected + 1))" 140 echo -n "$expected" >"$mpoint/test" 141 142 # Make sure all symlinks are still valid 143 udevadm wait --settle --timeout=30 "${part_links[@]}" 144 done 145 146 multipath -l "$path" 147 # Three paths should be now marked as 'offline' and one as 'running' 148 [[ "$(multipath -l "$path" | grep -c offline)" -eq 3 ]] 149 [[ "$(multipath -l "$path" | grep -c running)" -eq 1 ]] 150 151 umount "$mpoint" 152 rm -fr "$mpoint" 153} 154 155testcase_simultaneous_events() { 156 local blockdev part partscript 157 158 blockdev="$(readlink -f /dev/disk/by-id/scsi-*_deadbeeftest)" 159 partscript="$(mktemp)" 160 161 if [[ ! -b "$blockdev" ]]; then 162 echo "ERROR: failed to find the test SCSI block device" 163 return 1 164 fi 165 166 cat >"$partscript" <<EOF 167$(printf 'name="test%d", size=2M\n' {1..50}) 168EOF 169 170 # Initial partition table 171 udevadm lock --device="$blockdev" sfdisk -q -X gpt "$blockdev" <"$partscript" 172 173 # Delete the partitions, immediately recreate them, wait for udev to settle 174 # down, and then check if we have any dangling symlinks in /dev/disk/. Rinse 175 # and repeat. 176 # 177 # On unpatched udev versions the delete-recreate cycle may trigger a race 178 # leading to dead symlinks in /dev/disk/ 179 for i in {1..100}; do 180 udevadm lock --device="$blockdev" sfdisk -q --delete "$blockdev" 181 udevadm lock --device="$blockdev" sfdisk -q -X gpt "$blockdev" <"$partscript" 182 183 if ((i % 10 == 0)); then 184 udevadm wait --settle --timeout=30 "$blockdev" 185 helper_check_device_symlinks 186 fi 187 done 188 189 rm -f "$partscript" 190} 191 192testcase_lvm_basic() { 193 local i part 194 local vgroup="MyTestGroup$RANDOM" 195 local devices=( 196 /dev/disk/by-id/ata-foobar_deadbeeflvm{0..3} 197 ) 198 199 # Make sure all the necessary soon-to-be-LVM devices exist 200 ls -l "${devices[@]}" 201 202 # Add all test devices into a volume group, create two logical volumes, 203 # and check if necessary symlinks exist (and are valid) 204 lvm pvcreate -y "${devices[@]}" 205 lvm pvs 206 lvm vgcreate "$vgroup" -y "${devices[@]}" 207 lvm vgs 208 lvm vgchange -ay "$vgroup" 209 lvm lvcreate -y -L 4M "$vgroup" -n mypart1 210 lvm lvcreate -y -L 8M "$vgroup" -n mypart2 211 lvm lvs 212 udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" 213 mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1" 214 udevadm wait --settle --timeout=30 "/dev/disk/by-label/mylvpart1" 215 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 216 217 # Disable the VG and check symlinks... 218 lvm vgchange -an "$vgroup" 219 udevadm wait --settle --timeout=30 --removed "/dev/$vgroup" "/dev/disk/by-label/mylvpart1" 220 helper_check_device_symlinks "/dev/disk" 221 222 # reenable the VG and check the symlinks again if all LVs are properly activated 223 lvm vgchange -ay "$vgroup" 224 udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1" 225 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 226 227 # Same as above, but now with more "stress" 228 for i in {1..50}; do 229 lvm vgchange -an "$vgroup" 230 lvm vgchange -ay "$vgroup" 231 232 if ((i % 5 == 0)); then 233 udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1" 234 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 235 fi 236 done 237 238 # Remove the first LV 239 lvm lvremove -y "$vgroup/mypart1" 240 udevadm wait --settle --timeout=30 --removed "/dev/$vgroup/mypart1" 241 udevadm wait --timeout=0 "/dev/$vgroup/mypart2" 242 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 243 244 # Create & remove LVs in a loop, i.e. with more "stress" 245 for i in {1..16}; do 246 # 1) Create 16 logical volumes 247 for part in {0..15}; do 248 lvm lvcreate -y -L 4M "$vgroup" -n "looppart$part" 249 done 250 251 # 2) Immediately remove them 252 lvm lvremove -y "$vgroup"/looppart{0..15} 253 254 # 3) On every 4th iteration settle udev and check if all partitions are 255 # indeed gone, and if all symlinks are still valid 256 if ((i % 4 == 0)); then 257 for part in {0..15}; do 258 udevadm wait --settle --timeout=30 --removed "/dev/$vgroup/looppart$part" 259 done 260 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 261 fi 262 done 263} 264 265testcase_btrfs_basic() { 266 local dev_stub i label mpoint uuid 267 local devices=( 268 /dev/disk/by-id/ata-foobar_deadbeefbtrfs{0..3} 269 ) 270 271 ls -l "${devices[@]}" 272 273 echo "Single device: default settings" 274 uuid="deadbeef-dead-dead-beef-000000000000" 275 label="btrfs_root" 276 udevadm lock --device="${devices[0]}" mkfs.btrfs -L "$label" -U "$uuid" "${devices[0]}" 277 udevadm wait --settle --timeout=30 "${devices[0]}" "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" 278 btrfs filesystem show 279 helper_check_device_symlinks 280 281 echo "Multiple devices: using partitions, data: single, metadata: raid1" 282 uuid="deadbeef-dead-dead-beef-000000000001" 283 label="btrfs_mpart" 284 udevadm lock --device="${devices[0]}" sfdisk --wipe=always "${devices[0]}" <<EOF 285label: gpt 286 287name="diskpart1", size=85M 288name="diskpart2", size=85M 289name="diskpart3", size=85M 290name="diskpart4", size=85M 291EOF 292 udevadm wait --settle --timeout=30 /dev/disk/by-partlabel/diskpart{1..4} 293 udevadm lock --device="${devices[0]}" mkfs.btrfs -d single -m raid1 -L "$label" -U "$uuid" /dev/disk/by-partlabel/diskpart{1..4} 294 udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" 295 btrfs filesystem show 296 helper_check_device_symlinks 297 wipefs -a -f "${devices[0]}" 298 udevadm wait --settle --timeout=30 --removed /dev/disk/by-partlabel/diskpart{1..4} 299 300 echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode" 301 uuid="deadbeef-dead-dead-beef-000000000002" 302 label="btrfs_mdisk" 303 udevadm lock \ 304 --device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs0 \ 305 --device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs1 \ 306 --device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs2 \ 307 --device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs3 \ 308 mkfs.btrfs -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}" 309 udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" 310 btrfs filesystem show 311 helper_check_device_symlinks 312 313 echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode" 314 uuid="deadbeef-dead-dead-beef-000000000003" 315 label="btrfs_mencdisk" 316 mpoint="/btrfs_enc$RANDOM" 317 mkdir "$mpoint" 318 # Create a key-file 319 dd if=/dev/urandom of=/etc/btrfs_keyfile bs=64 count=1 iflag=fullblock 320 chmod 0600 /etc/btrfs_keyfile 321 # Encrypt each device and add it to /etc/crypttab, so it can be mounted 322 # automagically later 323 : >/etc/crypttab 324 for ((i = 0; i < ${#devices[@]}; i++)); do 325 # Intentionally use weaker cipher-related settings, since we don't care 326 # about security here as it's a throwaway LUKS partition 327 cryptsetup luksFormat -q \ 328 --use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \ 329 --uuid "deadbeef-dead-dead-beef-11111111111$i" --label "encdisk$i" "${devices[$i]}" /etc/btrfs_keyfile 330 udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111$i" "/dev/disk/by-label/encdisk$i" 331 # Add the device into /etc/crypttab, reload systemd, and then activate 332 # the device so we can create a filesystem on it later 333 echo "encbtrfs$i UUID=deadbeef-dead-dead-beef-11111111111$i /etc/btrfs_keyfile luks,noearly" >>/etc/crypttab 334 systemctl daemon-reload 335 systemctl start "systemd-cryptsetup@encbtrfs$i" 336 done 337 helper_check_device_symlinks 338 # Check if we have all necessary DM devices 339 ls -l /dev/mapper/encbtrfs{0..3} 340 # Create a multi-device btrfs filesystem on the LUKS devices 341 udevadm lock \ 342 --device=/dev/mapper/encbtrfs0 \ 343 --device=/dev/mapper/encbtrfs1 \ 344 --device=/dev/mapper/encbtrfs2 \ 345 --device=/dev/mapper/encbtrfs3 \ 346 mkfs.btrfs -M -d raid1 -m raid1 -L "$label" -U "$uuid" /dev/mapper/encbtrfs{0..3} 347 udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" 348 btrfs filesystem show 349 helper_check_device_symlinks 350 # Mount it and write some data to it we can compare later 351 mount -t btrfs /dev/mapper/encbtrfs0 "$mpoint" 352 echo "hello there" >"$mpoint/test" 353 # "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise) 354 umount "$mpoint" 355 systemctl stop systemd-cryptsetup@encbtrfs{0..3} 356 udevadm wait --settle --timeout=30 --removed "/dev/disk/by-uuid/$uuid" 357 helper_check_device_symlinks 358 # Add the mount point to /etc/fstab and check if the device can be put together 359 # automagically. The source device is the DM name of the first LUKS device 360 # (from /etc/crypttab). We have to specify all LUKS devices manually, as 361 # registering the necessary devices is usually initrd's job (via btrfs device scan) 362 dev_stub="/dev/mapper/encbtrfs" 363 echo "/dev/mapper/encbtrfs0 $mpoint btrfs device=${dev_stub}0,device=${dev_stub}1,device=${dev_stub}2,device=${dev_stub}3 0 2" >>/etc/fstab 364 # Tell systemd about the new mount 365 systemctl daemon-reload 366 # Restart cryptsetup.target to trigger autounlock of partitions in /etc/crypttab 367 systemctl restart cryptsetup.target 368 # Start the corresponding mount unit and check if the btrfs device was reconstructed 369 # correctly 370 systemctl start "${mpoint##*/}.mount" 371 udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" 372 btrfs filesystem show 373 helper_check_device_symlinks 374 grep "hello there" "$mpoint/test" 375 # Cleanup 376 systemctl stop "${mpoint##*/}.mount" 377 systemctl stop systemd-cryptsetup@encbtrfs{0..3} 378 sed -i "/${mpoint##*/}/d" /etc/fstab 379 : >/etc/crypttab 380 rm -fr "$mpoint" 381 systemctl daemon-reload 382 udevadm settle 383} 384 385testcase_iscsi_lvm() { 386 local dev i label link lun_id mpoint target_name uuid 387 local target_ip="127.0.0.1" 388 local target_port="3260" 389 local vgroup="iscsi_lvm$RANDOM" 390 local expected_symlinks=() 391 local devices=( 392 /dev/disk/by-id/ata-foobar_deadbeefiscsi{0..3} 393 ) 394 395 ls -l "${devices[@]}" 396 397 # Start the target daemon 398 systemctl start tgtd 399 systemctl status tgtd 400 401 echo "iSCSI LUNs backed by devices" 402 # See RFC3721 and RFC7143 403 target_name="iqn.2021-09.com.example:iscsi.test" 404 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each 405 # backed by a device 406 tgtadm --lld iscsi --op new --mode target --tid=1 --targetname "$target_name" 407 for ((i = 0; i < ${#devices[@]}; i++)); do 408 # lun-0 is reserved by iSCSI 409 lun_id="$((i + 1))" 410 tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun "$lun_id" -b "${devices[$i]}" 411 tgtadm --lld iscsi --op update --mode logicalunit --tid 1 --lun "$lun_id" 412 expected_symlinks+=( 413 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$lun_id" 414 ) 415 done 416 tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL 417 # Configure the iSCSI initiator 418 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover 419 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login 420 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 421 helper_check_device_symlinks 422 # Cleanup 423 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout 424 tgtadm --lld iscsi --op delete --mode target --tid=1 425 426 echo "iSCSI LUNs backed by files + LVM" 427 # Note: we use files here to "trick" LVM the disks are indeed on a different 428 # host, so it doesn't automagically detect another path to the backing 429 # device once we disconnect the iSCSI devices 430 target_name="iqn.2021-09.com.example:iscsi.lvm.test" 431 mpoint="$(mktemp -d /iscsi_storeXXX)" 432 expected_symlinks=() 433 # Use the first device as it's configured with larger capacity 434 mkfs.ext4 -L iscsi_store "${devices[0]}" 435 udevadm wait --settle --timeout=30 "${devices[0]}" 436 mount "${devices[0]}" "$mpoint" 437 for i in {1..4}; do 438 dd if=/dev/zero of="$mpoint/lun$i.img" bs=1M count=32 439 done 440 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each 441 # backed by a file 442 tgtadm --lld iscsi --op new --mode target --tid=2 --targetname "$target_name" 443 # lun-0 is reserved by iSCSI 444 for i in {1..4}; do 445 tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun "$i" -b "$mpoint/lun$i.img" 446 tgtadm --lld iscsi --op update --mode logicalunit --tid 2 --lun "$i" 447 expected_symlinks+=( 448 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$i" 449 ) 450 done 451 tgtadm --lld iscsi --op bind --mode target --tid 2 -I ALL 452 # Configure the iSCSI initiator 453 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover 454 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login 455 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 456 helper_check_device_symlinks 457 # Add all iSCSI devices into a LVM volume group, create two logical volumes, 458 # and check if necessary symlinks exist (and are valid) 459 lvm pvcreate -y "${expected_symlinks[@]}" 460 lvm pvs 461 lvm vgcreate "$vgroup" -y "${expected_symlinks[@]}" 462 lvm vgs 463 lvm vgchange -ay "$vgroup" 464 lvm lvcreate -y -L 4M "$vgroup" -n mypart1 465 lvm lvcreate -y -L 8M "$vgroup" -n mypart2 466 lvm lvs 467 udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" 468 mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1" 469 udevadm wait --settle --timeout=30 "/dev/disk/by-label/mylvpart1" 470 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 471 # Disconnect the iSCSI devices and check all the symlinks 472 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout 473 # "Reset" the DM state, since we yanked the backing storage from under the LVM, 474 # so the currently active VGs/LVs are invalid 475 dmsetup remove_all --deferred 476 # The LVM and iSCSI related symlinks should be gone 477 udevadm wait --settle --timeout=30 --removed "/dev/$vgroup" "/dev/disk/by-label/mylvpart1" "${expected_symlinks[@]}" 478 helper_check_device_symlinks "/dev/disk" 479 # Reconnect the iSCSI devices and check if everything get detected correctly 480 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover 481 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login 482 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1" 483 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" 484 # Cleanup 485 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout 486 tgtadm --lld iscsi --op delete --mode target --tid=2 487 umount "$mpoint" 488 rm -rf "$mpoint" 489} 490 491testcase_long_sysfs_path() { 492 local cursor link logfile mpoint 493 local expected_symlinks=( 494 "/dev/disk/by-label/data_vol" 495 "/dev/disk/by-label/swap_vol" 496 "/dev/disk/by-partlabel/test_swap" 497 "/dev/disk/by-partlabel/test_part" 498 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000" 499 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111" 500 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-222222222222" 501 ) 502 503 # Create a cursor file to skip messages generated by udevd in initrd, as it 504 # might not be the same up-to-date version as we currently run (hence generating 505 # messages we check for later and making the test fail) 506 cursor="$(mktemp)" 507 journalctl --cursor-file="${cursor:?}" -n0 -q 508 509 # Make sure the test device is connected and show its "wonderful" path 510 stat /sys/block/vda 511 readlink -f /sys/block/vda/dev 512 513 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 514 515 # Try to mount the data partition manually (using its label) 516 mpoint="$(mktemp -d /logsysfsXXX)" 517 mount LABEL=data_vol "$mpoint" 518 touch "$mpoint/test" 519 umount "$mpoint" 520 # Do the same, but with UUID and using fstab 521 echo "UUID=deadbeef-dead-dead-beef-222222222222 $mpoint ext4 defaults 0 0" >>/etc/fstab 522 systemctl daemon-reload 523 mount "$mpoint" 524 systemctl status "$mpoint" 525 test -e "$mpoint/test" 526 umount "$mpoint" 527 528 # Test out the swap partition 529 swapon -v -L swap_vol 530 swapoff -v -L swap_vol 531 532 udevadm settle 533 534 logfile="$(mktemp)" 535 # Check state of affairs after https://github.com/systemd/systemd/pull/22759 536 # Note: can't use `--cursor-file` here, since we don't want to update the cursor 537 # after using it 538 [[ "$(journalctl --after-cursor="$(<"$cursor")" -q --no-pager -o short-monotonic -p info --grep "Device path.*vda.?' too long to fit into unit name" | wc -l)" -eq 0 ]] 539 [[ "$(journalctl --after-cursor="$(<"$cursor")" -q --no-pager -o short-monotonic --grep "Unit name .*vda.?\.device\" too long, falling back to hashed unit name" | wc -l)" -gt 0 ]] 540 # Check if the respective "hashed" units exist and are active (plugged) 541 systemctl status --no-pager "$(readlink -f /sys/block/vda/vda1)" 542 systemctl status --no-pager "$(readlink -f /sys/block/vda/vda2)" 543 # Make sure we don't unnecessarily spam the log 544 { journalctl -b -q --no-pager -o short-monotonic -p info --grep "/sys/devices/.+/vda[0-9]?" _PID=1 + UNIT=systemd-udevd.service || :;} | tee "$logfile" 545 [[ "$(wc -l <"$logfile")" -lt 10 ]] 546 547 : >/etc/fstab 548 rm -fr "${cursor:?}" "${logfile:?}" "${mpoint:?}" 549} 550 551testcase_mdadm_basic() { 552 local i part_name raid_name raid_dev uuid 553 local expected_symlinks=() 554 local devices=( 555 /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..4} 556 ) 557 558 ls -l "${devices[@]}" 559 560 echo "Mirror raid (RAID 1)" 561 raid_name="mdmirror" 562 raid_dev="/dev/md/$raid_name" 563 part_name="${raid_name}_part" 564 uuid="aaaaaaaa:bbbbbbbb:cccccccc:00000001" 565 expected_symlinks=( 566 "$raid_dev" 567 "/dev/disk/by-id/md-name-H:$raid_name" 568 "/dev/disk/by-id/md-uuid-$uuid" 569 "/dev/disk/by-label/$part_name" # ext4 partition 570 ) 571 # Create a simple RAID 1 with an ext4 filesystem 572 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2 573 udevadm wait --settle --timeout=30 "$raid_dev" 574 mkfs.ext4 -L "$part_name" "$raid_dev" 575 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 576 for i in {0..9}; do 577 echo "Disassemble - reassemble loop, iteration #$i" 578 mdadm -v --stop "$raid_dev" 579 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 580 mdadm --assemble "$raid_dev" --name "$raid_name" -v 581 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 582 done 583 helper_check_device_symlinks 584 # Cleanup 585 mdadm -v --stop "$raid_dev" 586 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 587 588 echo "Parity raid (RAID 5)" 589 raid_name="mdparity" 590 raid_dev="/dev/md/$raid_name" 591 part_name="${raid_name}_part" 592 uuid="aaaaaaaa:bbbbbbbb:cccccccc:00000101" 593 expected_symlinks=( 594 "$raid_dev" 595 "/dev/disk/by-id/md-name-H:$raid_name" 596 "/dev/disk/by-id/md-uuid-$uuid" 597 "/dev/disk/by-label/$part_name" # ext4 partition 598 ) 599 # Create a simple RAID 5 with an ext4 filesystem 600 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3 601 udevadm wait --settle --timeout=30 "$raid_dev" 602 mkfs.ext4 -L "$part_name" "$raid_dev" 603 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 604 for i in {0..9}; do 605 echo "Disassemble - reassemble loop, iteration #$i" 606 mdadm -v --stop "$raid_dev" 607 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 608 mdadm --assemble "$raid_dev" --name "$raid_name" -v 609 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 610 done 611 helper_check_device_symlinks 612 # Cleanup 613 mdadm -v --stop "$raid_dev" 614 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 615 616 echo "Mirror + parity raid (RAID 10) + multiple partitions" 617 raid_name="mdmirpar" 618 raid_dev="/dev/md/$raid_name" 619 part_name="${raid_name}_part" 620 uuid="aaaaaaaa:bbbbbbbb:cccccccc:00001010" 621 expected_symlinks=( 622 "$raid_dev" 623 "/dev/disk/by-id/md-name-H:$raid_name" 624 "/dev/disk/by-id/md-uuid-$uuid" 625 "/dev/disk/by-label/$part_name" # ext4 partition 626 # Partitions 627 "${raid_dev}1" 628 "${raid_dev}2" 629 "${raid_dev}3" 630 "/dev/disk/by-id/md-name-H:$raid_name-part1" 631 "/dev/disk/by-id/md-name-H:$raid_name-part2" 632 "/dev/disk/by-id/md-name-H:$raid_name-part3" 633 "/dev/disk/by-id/md-uuid-$uuid-part1" 634 "/dev/disk/by-id/md-uuid-$uuid-part2" 635 "/dev/disk/by-id/md-uuid-$uuid-part3" 636 ) 637 # Create a simple RAID 10 with an ext4 filesystem 638 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4 639 udevadm wait --settle --timeout=30 "$raid_dev" 640 # Partition the raid device 641 # Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices. 642 sfdisk --wipe=always "$raid_dev" <<EOF 643label: gpt 644 645uuid="deadbeef-dead-dead-beef-111111111111", name="mdpart1", size=8M 646uuid="deadbeef-dead-dead-beef-222222222222", name="mdpart2", size=32M 647uuid="deadbeef-dead-dead-beef-333333333333", name="mdpart3", size=16M 648EOF 649 udevadm wait --settle --timeout=30 "/dev/disk/by-id/md-uuid-$uuid-part2" 650 mkfs.ext4 -L "$part_name" "/dev/disk/by-id/md-uuid-$uuid-part2" 651 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 652 for i in {0..9}; do 653 echo "Disassemble - reassemble loop, iteration #$i" 654 mdadm -v --stop "$raid_dev" 655 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 656 mdadm --assemble "$raid_dev" --name "$raid_name" -v 657 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 658 done 659 helper_check_device_symlinks 660 # Cleanup 661 mdadm -v --stop "$raid_dev" 662 # Check if all expected symlinks were removed after the cleanup 663 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 664} 665 666testcase_mdadm_lvm() { 667 local part_name raid_name raid_dev uuid vgroup 668 local expected_symlinks=() 669 local devices=( 670 /dev/disk/by-id/ata-foobar_deadbeefmdadmlvm{0..4} 671 ) 672 673 ls -l "${devices[@]}" 674 675 raid_name="mdlvm" 676 raid_dev="/dev/md/$raid_name" 677 part_name="${raid_name}_part" 678 vgroup="${raid_name}_vg" 679 uuid="aaaaaaaa:bbbbbbbb:ffffffff:00001010" 680 expected_symlinks=( 681 "$raid_dev" 682 "/dev/$vgroup/mypart1" # LVM partition 683 "/dev/$vgroup/mypart2" # LVM partition 684 "/dev/disk/by-id/md-name-H:$raid_name" 685 "/dev/disk/by-id/md-uuid-$uuid" 686 "/dev/disk/by-label/$part_name" # ext4 partition 687 ) 688 # Create a RAID 10 with LVM + ext4 689 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4 690 udevadm wait --settle --timeout=30 "$raid_dev" 691 # Create an LVM on the MD 692 lvm pvcreate -y "$raid_dev" 693 lvm pvs 694 lvm vgcreate "$vgroup" -y "$raid_dev" 695 lvm vgs 696 lvm vgchange -ay "$vgroup" 697 lvm lvcreate -y -L 4M "$vgroup" -n mypart1 698 lvm lvcreate -y -L 8M "$vgroup" -n mypart2 699 lvm lvs 700 udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" 701 mkfs.ext4 -L "$part_name" "/dev/$vgroup/mypart2" 702 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 703 # Disassemble the array 704 lvm vgchange -an "$vgroup" 705 mdadm -v --stop "$raid_dev" 706 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 707 helper_check_device_symlinks 708 # Reassemble it and check if all required symlinks exist 709 mdadm --assemble "$raid_dev" --name "$raid_name" -v 710 udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" 711 helper_check_device_symlinks 712 # Cleanup 713 lvm vgchange -an "$vgroup" 714 mdadm -v --stop "$raid_dev" 715 # Check if all expected symlinks were removed after the cleanup 716 udevadm wait --settle --timeout=30 --removed "${expected_symlinks[@]}" 717} 718 719: >/failed 720 721udevadm settle 722udevadm control --log-level debug 723lsblk -a 724 725echo "Check if all symlinks under /dev/disk/ are valid (pre-test)" 726helper_check_device_symlinks 727 728# TEST_FUNCTION_NAME is passed on the kernel command line via systemd.setenv= 729# in the respective test.sh file 730if ! command -v "${TEST_FUNCTION_NAME:?}"; then 731 echo >&2 "Missing verification handler for test case '$TEST_FUNCTION_NAME'" 732 exit 1 733fi 734 735echo "TEST_FUNCTION_NAME=$TEST_FUNCTION_NAME" 736"$TEST_FUNCTION_NAME" 737udevadm settle 738 739echo "Check if all symlinks under /dev/disk/ are valid (post-test)" 740helper_check_device_symlinks 741 742udevadm control --log-level info 743 744systemctl status systemd-udevd 745 746touch /testok 747rm /failed 748