zfs-builds-mm/zfs-0.8.0/tests/zfs-tests/include/libtest.shlib
2019-07-06 23:40:11 +02:00

3596 lines
74 KiB
Text

#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
# Copyright (c) 2017 by Tim Chase. All rights reserved.
# Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
# Copyright (c) 2017 Datto Inc.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
# Use is subject to license terms.
#
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
#
if [ -n "$STF_PATH" ]; then
PATH="$STF_PATH"
fi
#
# Generic dot version comparison function
#
# Returns success when version $1 is greater than or equal to $2.
#
function compare_version_gte
{
if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
return 0
else
return 1
fi
}
# Linux kernel version comparison function
#
# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
#
# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
#
function linux_version
{
typeset ver="$1"
[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
typeset version=$(echo $ver | cut -d '.' -f 1)
typeset major=$(echo $ver | cut -d '.' -f 2)
typeset minor=$(echo $ver | cut -d '.' -f 3)
[[ -z "$version" ]] && version=0
[[ -z "$major" ]] && major=0
[[ -z "$minor" ]] && minor=0
echo $((version * 10000 + major * 100 + minor))
}
# Determine if this is a Linux test system
#
# Return 0 if platform Linux, 1 if otherwise
function is_linux
{
if [[ $(uname -o) == "GNU/Linux" ]]; then
return 0
else
return 1
fi
}
# Determine if this is a 32-bit system
#
# Return 0 if platform is 32-bit, 1 if otherwise
function is_32bit
{
if [[ $(getconf LONG_BIT) == "32" ]]; then
return 0
else
return 1
fi
}
# Determine if kmemleak is enabled
#
# Return 0 if kmemleak is enabled, 1 if otherwise
function is_kmemleak
{
if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
return 0
else
return 1
fi
}
# Determine whether a dataset is mounted
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
#
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
function ismounted
{
typeset fstype=$2
[[ -z $fstype ]] && fstype=zfs
typeset out dir name ret
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
for out in $(zfs mount | awk '{print $2}'); do
[[ $1 == $out ]] && return 0
done
else
for out in $(zfs mount | awk '{print $1}'); do
[[ $1 == $out ]] && return 0
done
fi
;;
ufs|nfs)
out=$(df -F $fstype $1 2>/dev/null)
ret=$?
(($ret != 0)) && return $ret
dir=${out%%\(*}
dir=${dir%% *}
name=${out##*\(}
name=${name%%\)*}
name=${name%% *}
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
;;
ext*)
out=$(df -t $fstype $1 2>/dev/null)
return $?
;;
zvol)
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
link=$(readlink -f $ZVOL_DEVDIR/$1)
[[ -n "$link" ]] && \
mount | grep -q "^$link" && \
return 0
fi
;;
esac
return 1
}
# Return 0 if a dataset is mounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function mounted
{
ismounted $1 $2
(($? == 0)) && return 0
return 1
}
# Return 0 if a dataset is unmounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function unmounted
{
ismounted $1 $2
(($? == 1)) && return 0
return 1
}
# split line on ","
#
# $1 - line to split
function splitline
{
echo $1 | sed "s/,/ /g"
}
function default_setup
{
default_setup_noexit "$@"
log_pass
}
function default_setup_no_mountpoint
{
default_setup_noexit "$1" "$2" "$3" "yes"
log_pass
}
#
# Given a list of disks, setup storage pools and datasets.
#
function default_setup_noexit
{
typeset disklist=$1
typeset container=$2
typeset volume=$3
typeset no_mountpoint=$4
log_note begin default_setup_noexit
if is_global_zone; then
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
fi
if [[ -n $container ]]; then
rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
log_must zfs create $TESTPOOL/$TESTCTR
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
block_device_wait
else
log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
#
# Given a list of disks, setup a storage pool, file system and
# a container.
#
function default_container_setup
{
typeset disklist=$1
default_setup "$disklist" "true"
}
#
# Given a list of disks, setup a storage pool,file system
# and a volume.
#
function default_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "" "true"
}
#
# Given a list of disks, setup a storage pool,file system,
# a container and a volume.
#
function default_container_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "true" "true"
}
#
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
# filesystem
#
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
# $2 snapshot name. Default, $TESTSNAP
#
function create_snapshot
{
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
typeset snap=${2:-$TESTSNAP}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
if snapexists $fs_vol@$snap; then
log_fail "$fs_vol@$snap already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
log_must zfs snapshot $fs_vol@$snap
}
#
# Create a clone from a snapshot, default clone name is $TESTCLONE.
#
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
#
function create_clone # snapshot clone
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
[[ -z $snap ]] && \
log_fail "Snapshot name is undefined."
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
log_must zfs clone $snap $clone
}
#
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
# filesystem.
#
# $1 Existing filesystem or volume name. Default, $TESTFS
# $2 Existing snapshot name. Default, $TESTSNAP
# $3 bookmark name. Default, $TESTBKMARK
#
function create_bookmark
{
typeset fs_vol=${1:-$TESTFS}
typeset snap=${2:-$TESTSNAP}
typeset bkmark=${3:-$TESTBKMARK}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
if bkmarkexists $fs_vol#$bkmark; then
log_fail "$fs_vol#$bkmark already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
snapexists $fs_vol@$snap || \
log_fail "$fs_vol@$snap must exist."
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
}
#
# Create a temporary clone result of an interrupted resumable 'zfs receive'
# $1 Destination filesystem name. Must not exist, will be created as the result
# of this function along with its %recv temporary clone
# $2 Source filesystem name. Must not exist, will be created and destroyed
#
function create_recv_clone
{
typeset recvfs="$1"
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
typeset snap="$sendfs@snap1"
typeset incr="$sendfs@snap2"
typeset mountpoint="$TESTDIR/create_recv_clone"
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
datasetexists $sendfs && log_fail "Send filesystem must not exist."
log_must zfs create -o mountpoint="$mountpoint" $sendfs
log_must zfs snapshot $snap
log_must eval "zfs send $snap | zfs recv -u $recvfs"
log_must mkfile 1m "$mountpoint/data"
log_must zfs snapshot $incr
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
destroy_dataset "$sendfs" "-r"
log_must rm -f "$sendfile"
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
log_fail "Error creating temporary $recvfs/%recv clone"
fi
}
function default_mirror_setup
{
default_mirror_setup_noexit $1 $2 $3
log_pass
}
#
# Given a pair of disks, set up a storage pool and dataset for the mirror
# @parameters: $1 the primary side of the mirror
# $2 the secondary side of the mirror
# @uses: ZPOOL ZFS TESTPOOL TESTFS
function default_mirror_setup_noexit
{
readonly func="default_mirror_setup_noexit"
typeset primary=$1
typeset secondary=$2
[[ -z $primary ]] && \
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL mirror $@
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# create a number of mirrors.
# We create a number($1) of 2 way mirrors using the pairs of disks named
# on the command line. These mirrors are *not* mounted
# @parameters: $1 the number of mirrors to create
# $... the devices to use to create the mirrors on
# @uses: ZPOOL ZFS TESTPOOL
function setup_mirrors
{
typeset -i nmirrors=$1
shift
while ((nmirrors > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
shift 2
((nmirrors = nmirrors - 1))
done
}
#
# create a number of raidz pools.
# We create a number($1) of 2 raidz pools using the pairs of disks named
# on the command line. These pools are *not* mounted
# @parameters: $1 the number of pools to create
# $... the devices to use to create the pools on
# @uses: ZPOOL ZFS TESTPOOL
function setup_raidzs
{
typeset -i nraidzs=$1
shift
while ((nraidzs > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
shift 2
((nraidzs = nraidzs - 1))
done
}
#
# Destroy the configured testpool mirrors.
# the mirrors are of the form ${TESTPOOL}{number}
# @uses: ZPOOL ZFS TESTPOOL
function destroy_mirrors
{
default_cleanup_noexit
log_pass
}
#
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
# $1 the list of disks
#
function default_raidz_setup
{
typeset disklist="$*"
disks=(${disklist[*]})
if [[ ${#disks[*]} -lt 2 ]]; then
log_fail "A raid-z requires a minimum of two disks."
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_pass
}
#
# Common function used to cleanup storage pools and datasets.
#
# Invoked at the start of the test suite to ensure the system
# is in a known state, and also at the end of each set of
# sub-tests to ensure errors from one set of tests doesn't
# impact the execution of the next set.
function default_cleanup
{
default_cleanup_noexit
log_pass
}
#
# Utility function used to list all available pool names.
#
# NOTE: $KEEP is a variable containing pool names, separated by a newline
# character, that must be excluded from the returned list.
#
function get_all_pools
{
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
}
function default_cleanup_noexit
{
typeset pool=""
#
# Destroying the pool will also destroy any
# filesystems it contains.
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
while [ ! -z ${ALL_POOLS} ]
do
for pool in ${ALL_POOLS}
do
if safe_to_destroy_pool $pool ;
then
destroy_pool $pool
fi
ALL_POOLS=$(get_all_pools)
done
done
zfs mount -a
else
typeset fs=""
for fs in $(zfs list -H -o name \
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
destroy_dataset "$fs" "-Rf"
done
# Need cleanup here to avoid garbage dir left.
for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
[[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
log_must zfs set reservation=none $fs
log_must zfs set recordsize=128K $fs
log_must zfs set mountpoint=/$fs $fs
typeset enc=""
enc=$(get_prop encryption $fs)
if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
[[ "$enc" == "off" ]]; then
log_must zfs set checksum=on $fs
fi
log_must zfs set compression=off $fs
log_must zfs set atime=on $fs
log_must zfs set devices=off $fs
log_must zfs set exec=on $fs
log_must zfs set setuid=on $fs
log_must zfs set readonly=off $fs
log_must zfs set snapdir=hidden $fs
log_must zfs set aclmode=groupmask $fs
log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
log_must rm -rf $TESTDIR
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
rm -f $TEST_BASE_DIR/{err,out}
}
#
# Common function used to cleanup storage pools, file systems
# and containers.
#
function default_container_cleanup
{
if ! is_global_zone; then
reexport_pool
fi
ismounted $TESTPOOL/$TESTCTR/$TESTFS1
[[ $? -eq 0 ]] && \
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1 > /dev/null 2>&1
default_cleanup
}
#
# Common function used to cleanup snapshot of file system or volume. Default to
# delete the file system's snapshot
#
# $1 snapshot name
#
function destroy_snapshot
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if ! snapexists $snap; then
log_fail "'$snap' does not exist."
fi
#
# For the sake of the value which come from 'get_prop' is not equal
# to the really mountpoint when the snapshot is unmounted. So, firstly
# check and make sure this snapshot's been mounted in current system.
#
typeset mtpt=""
if ismounted $snap; then
mtpt=$(get_prop mountpoint $snap)
(($? != 0)) && \
log_fail "get_prop mountpoint $snap failed."
fi
destroy_dataset "$snap"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup clone.
#
# $1 clone name
#
function destroy_clone
{
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
if ! datasetexists $clone; then
log_fail "'$clone' does not existed."
fi
# With the same reason in destroy_snapshot
typeset mtpt=""
if ismounted $clone; then
mtpt=$(get_prop mountpoint $clone)
(($? != 0)) && \
log_fail "get_prop mountpoint $clone failed."
fi
destroy_dataset "$clone"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup bookmark of file system or volume. Default
# to delete the file system's bookmark.
#
# $1 bookmark name
#
function destroy_bookmark
{
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
if ! bkmarkexists $bkmark; then
log_fail "'$bkmarkp' does not existed."
fi
destroy_dataset "$bkmark"
}
# Return 0 if a snapshot exists; $? otherwise
#
# $1 - snapshot name
function snapexists
{
zfs list -H -t snapshot "$1" > /dev/null 2>&1
return $?
}
#
# Return 0 if a bookmark exists; $? otherwise
#
# $1 - bookmark name
#
function bkmarkexists
{
zfs list -H -t bookmark "$1" > /dev/null 2>&1
return $?
}
#
# Set a property to a certain value on a dataset.
# Sets a property of the dataset to the value as passed in.
# @param:
# $1 dataset who's property is being set
# $2 property to set
# $3 value to set property to
# @return:
# 0 if the property could be set.
# non-zero otherwise.
# @use: ZFS
#
function dataset_setprop
{
typeset fn=dataset_setprop
if (($# < 3)); then
log_note "$fn: Insufficient parameters (need 3, had $#)"
return 1
fi
typeset output=
output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
log_note "property $2=$3"
log_note "Return Code: $rv"
log_note "Output: $output"
return $rv
fi
return 0
}
#
# Assign suite defined dataset properties.
# This function is used to apply the suite's defined default set of
# properties to a dataset.
# @parameters: $1 dataset to use
# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
# @returns:
# 0 if the dataset has been altered.
# 1 if no pool name was passed in.
# 2 if the dataset could not be found.
# 3 if the dataset could not have it's properties set.
#
function dataset_set_defaultproperties
{
typeset dataset="$1"
[[ -z $dataset ]] && return 1
typeset confset=
typeset -i found=0
for confset in $(zfs list); do
if [[ $dataset = $confset ]]; then
found=1
break
fi
done
[[ $found -eq 0 ]] && return 2
if [[ -n $COMPRESSION_PROP ]]; then
dataset_setprop $dataset compression $COMPRESSION_PROP || \
return 3
log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
fi
if [[ -n $CHECKSUM_PROP ]]; then
dataset_setprop $dataset checksum $CHECKSUM_PROP || \
return 3
log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
fi
return 0
}
#
# Check a numeric assertion
# @parameter: $@ the assertion to check
# @output: big loud notice if assertion failed
# @use: log_fail
#
function assert
{
(($@)) || log_fail "$@"
}
#
# Function to format partition size of a disk
# Given a disk cxtxdx reduces all partitions
# to 0 size
#
function zero_partitions #<whole_disk_name>
{
typeset diskname=$1
typeset i
if is_linux; then
DSK=$DEV_DSKDIR/$diskname
DSK=$(echo $DSK | sed -e "s|//|/|g")
log_must parted $DSK -s -- mklabel gpt
blockdev --rereadpt $DSK 2>/dev/null
block_device_wait
else
for i in 0 1 3 4 5 6 7
do
log_must set_partition $i "" 0mb $diskname
done
fi
return 0
}
#
# Given a slice, size and disk, this function
# formats the slice to the specified size.
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utilty
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
#
function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
{
typeset -i slicenum=$1
typeset start=$2
typeset size=$3
typeset disk=$4
if is_linux; then
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
typeset size_mb=${size%%[mMgG]}
size_mb=${size_mb%%[mMgG][bB]}
if [[ ${size:1:1} == 'g' ]]; then
((size_mb = size_mb * 1024))
fi
# Create GPT partition table when setting slice 0 or
# when the device doesn't already contain a GPT label.
parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
typeset ret_val=$?
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
parted $DEV_DSKDIR/$disk -s -- mklabel gpt
if [[ $? -ne 0 ]]; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
# When no start is given align on the first cylinder.
if [[ -z "$start" ]]; then
start=1
fi
# Determine the cylinder size for the device and using
# that calculate the end offset in cylinders.
typeset -i cly_size_kb=0
cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
unit cyl print | head -3 | tail -1 | \
awk -F '[:k.]' '{print $4}')
((end = (size_mb * 1024 / cly_size_kb) + start))
parted $DEV_DSKDIR/$disk -s -- \
mkpart part$slicenum ${start}cyl ${end}cyl
if [[ $? -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
block_device_wait
else
if [[ -z $slicenum || -z $size || -z $disk ]]; then
log_fail "The slice, size or disk name is unspecified."
fi
typeset format_file=/var/tmp/format_in.$$
echo "partition" >$format_file
echo "$slicenum" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "$start" >> $format_file
echo "$size" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
fi
typeset ret_val=$?
rm -f $format_file
if [[ $ret_val -ne 0 ]]; then
log_note "Unable to format $disk slice $slicenum to $size"
return 1
fi
return 0
}
#
# Delete all partitions on all disks - this is specifically for the use of multipath
# devices which currently can only be used in the test suite as raw/un-partitioned
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
#
function delete_partitions
{
typeset -i j=1
if [[ -z $DISK_ARRAY_NUM ]]; then
DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
fi
if [[ -z $DISKSARRAY ]]; then
DISKSARRAY=$DISKS
fi
if is_linux; then
if (( $DISK_ARRAY_NUM == 1 )); then
while ((j < MAX_PARTITIONS)); do
parted $DEV_DSKDIR/$DISK -s rm $j \
> /dev/null 2>&1
if (( $? == 1 )); then
lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 1 )); then
log_note "Partitions for $DISK should be deleted"
else
log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
fi
return 0
else
lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 0 )); then
log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
fi
fi
((j = j+1))
done
else
for disk in `echo $DISKSARRAY`; do
while ((j < MAX_PARTITIONS)); do
parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
if (( $? == 1 )); then
lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 1 )); then
log_note "Partitions for $disk should be deleted"
else
log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
fi
j=7
else
lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 0 )); then
log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
fi
fi
((j = j+1))
done
j=1
done
fi
fi
return 0
}
#
# Get the end cyl of the given slice
#
function get_endslice #<disk> <slice>
{
typeset disk=$1
typeset slice=$2
if [[ -z $disk || -z $slice ]] ; then
log_fail "The disk name or slice number is unspecified."
fi
if is_linux; then
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
grep "part${slice}" | \
awk '{print $3}' | \
sed 's,cyl,,')
((endcyl = (endcyl + 1)))
else
disk=${disk#/dev/dsk/}
disk=${disk#/dev/rdsk/}
disk=${disk%s*}
typeset -i ratio=0
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
grep "sectors\/cylinder" | \
awk '{print $2}')
if ((ratio == 0)); then
return
fi
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
nawk -v token="$slice" '{if ($1==token) print $6}')
((endcyl = (endcyl + 1) / ratio))
fi
echo $endcyl
}
#
# Given a size,disk and total slice number, this function formats the
# disk slices from 0 to the total slice number with the same specified
# size.
#
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
{
typeset -i i=0
typeset slice_size=$1
typeset disk_name=$2
typeset total_slices=$3
typeset cyl
zero_partitions $disk_name
while ((i < $total_slices)); do
if ! is_linux; then
if ((i == 2)); then
((i = i + 1))
continue
fi
fi
log_must set_partition $i "$cyl" $slice_size $disk_name
cyl=$(get_endslice $disk_name $i)
((i = i+1))
done
}
#
# This function continues to write to a filenum number of files into dirnum
# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
#
# Return value: 0 on success
# non 0 on error
#
# Where :
# destdir: is the directory where everything is to be created under
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: numer of types to write out bytes
# data: the data that will be written
#
# E.g.
# file_fs /testdir 20 25 1024 256 0
#
# Note: bytes * num_writes equals the size of the testfile
#
function fill_fs # destdir dirnum filenum bytes num_writes data
{
typeset destdir=${1:-$TESTDIR}
typeset -i dirnum=${2:-50}
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
typeset data=${6:-0}
typeset -i odirnum=1
typeset -i idirnum=0
typeset -i fn=0
typeset -i retval=0
mkdir -p $destdir/$idirnum
while (($odirnum > 0)); do
if ((dirnum >= 0 && idirnum >= dirnum)); then
odirnum=0
break
fi
file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
-b $bytes -c $num_writes -d $data
retval=$?
if (($retval != 0)); then
odirnum=0
break
fi
if (($fn >= $filenum)); then
fn=0
((idirnum = idirnum + 1))
mkdir -p $destdir/$idirnum
else
((fn = fn + 1))
fi
done
return $retval
}
#
# Simple function to get the specified property. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_prop # property dataset
{
typeset prop_val
typeset prop=$1
typeset dataset=$2
prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for dataset " \
"$dataset"
return 1
fi
echo "$prop_val"
return 0
}
#
# Simple function to get the specified property of pool. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_pool_prop # property pool
{
typeset prop_val
typeset prop=$1
typeset pool=$2
if poolexists $pool ; then
prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
awk '{print $3}')
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for pool " \
"$pool"
return 1
fi
else
log_note "Pool $pool not exists."
return 1
fi
echo "$prop_val"
return 0
}
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
function poolexists
{
typeset pool=$1
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
zpool get name "$pool" > /dev/null 2>&1
return $?
}
# Return 0 if all the specified datasets exist; $? otherwise
#
# $1-n dataset name
function datasetexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs get name $1 > /dev/null 2>&1 || \
return $?
shift
done
return 0
}
# return 0 if none of the specified datasets exists, otherwise return 1.
#
# $1-n dataset name
function datasetnonexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
return 0
}
function is_shared_impl
{
typeset fs=$1
typeset mtpt
if is_linux; then
for mtpt in `share | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
fi
for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
return 1
}
#
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
is_shared_impl "$fs"
}
#
# Given a dataset name determine if it is shared via SMB.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared_smb
{
typeset fs=$1
typeset mtpt
if datasetnonexists "$fs" ; then
return 1
else
fs=$(echo $fs | sed 's@/@_@g')
fi
if is_linux; then
for mtpt in `net usershare list | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
else
log_unsupported "Currently unsupported by the test framework"
return 1
fi
}
#
# Given a mountpoint, determine if it is not shared via NFS.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared
{
typeset fs=$1
is_shared $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Given a dataset determine if it is not shared via SMB.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared_smb
{
typeset fs=$1
is_shared_smb $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Helper function to unshare a mountpoint.
#
function unshare_fs #fs
{
typeset fs=$1
is_shared $fs || is_shared_smb $fs
if (($? == 0)); then
log_must zfs unshare $fs
fi
return 0
}
#
# Helper function to share a NFS mountpoint.
#
function share_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? != 0)); then
log_must share "*:$fs"
fi
else
is_shared $fs
if (($? != 0)); then
log_must share -F nfs $fs
fi
fi
return 0
}
#
# Helper function to unshare a NFS mountpoint.
#
function unshare_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? == 0)); then
log_must unshare -u "*:$fs"
fi
else
is_shared $fs
if (($? == 0)); then
log_must unshare -F nfs $fs
fi
fi
return 0
}
#
# Helper function to show NFS shares.
#
function showshares_nfs
{
if is_linux; then
share -v
else
share -F nfs
fi
return 0
}
#
# Helper function to show SMB shares.
#
function showshares_smb
{
if is_linux; then
net usershare list
else
share -F smb
fi
return 0
}
#
# Check NFS server status and trigger it online.
#
function setup_nfs_server
{
# Cannot share directory in non-global zone.
#
if ! is_global_zone; then
log_note "Cannot trigger NFS server by sharing in LZ."
return
fi
if is_linux; then
#
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
# /etc/exports.d./* to provide a clean test environment.
#
log_must share -r
log_note "NFS server must be started prior to running ZTS."
return
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
#
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
log_must rm -rf $dummy
fi
log_must mkdir $dummy
log_must share $dummy
#
# Waiting for fmri's status to be the final status.
# Otherwise, in transition, an asterisk (*) is appended for
# instances, unshare will reverse status to 'DIS' again.
#
# Waiting for 1's at least.
#
log_must sleep 1
timeout=10
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
log_must sleep 1
((timeout -= 1))
done
log_must unshare $dummy
log_must rm -rf $dummy
fi
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
# To verify whether calling process is in global zone
#
# Return 0 if in global zone, 1 in non-global zone
#
function is_global_zone
{
if is_linux; then
return 0
else
typeset cur_zone=$(zonename 2>/dev/null)
if [[ $cur_zone != "global" ]]; then
return 1
fi
return 0
fi
}
#
# Verify whether test is permitted to run from
# global zone, local zone, or both
#
# $1 zone limit, could be "global", "local", or "both"(no limit)
#
# Return 0 if permitted, otherwise exit with log_unsupported
#
function verify_runnable # zone limit
{
typeset limit=$1
[[ -z $limit ]] && return 0
if is_global_zone ; then
case $limit in
global|both)
;;
local) log_unsupported "Test is unable to run from "\
"global zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
else
case $limit in
local|both)
;;
global) log_unsupported "Test is unable to run from "\
"local zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
reexport_pool
fi
return 0
}
# Return 0 if create successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# $2-n - [keyword] devs_list
function create_pool #pool devs_list
{
typeset pool=${1%%/*}
shift
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi
if poolexists $pool ; then
destroy_pool $pool
fi
if is_global_zone ; then
[[ -d /$pool ]] && rm -rf /$pool
log_must zpool create -f $pool $@
fi
return 0
}
# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# Destroy pool with the given parameters.
function destroy_pool #pool
{
typeset pool=${1%%/*}
typeset mtpt
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
if is_global_zone ; then
if poolexists "$pool" ; then
mtpt=$(get_prop mountpoint "$pool")
# At times, syseventd/udev activity can cause attempts
# to destroy a pool to fail with EBUSY. We retry a few
# times allowing failures before requiring the destroy
# to succeed.
log_must_busy zpool destroy -f $pool
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
fi
fi
return 0
}
# Return 0 if created successfully; $? otherwise
#
# $1 - dataset name
# $2-n - dataset options
function create_dataset #dataset dataset_options
{
typeset dataset=$1
shift
if [[ -z $dataset ]]; then
log_note "Missing dataset name."
return 1
fi
if datasetexists $dataset ; then
destroy_dataset $dataset
fi
log_must zfs create $@ $dataset
return 0
}
# Return 0 if destroy successfully or the dataset exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - dataset name
# $2 - custom arguments for zfs destroy
# Destroy dataset with the given parameters.
function destroy_dataset #dataset #args
{
typeset dataset=$1
typeset mtpt
typeset args=${2:-""}
if [[ -z $dataset ]]; then
log_note "No dataset name given."
return 1
fi
if is_global_zone ; then
if datasetexists "$dataset" ; then
mtpt=$(get_prop mountpoint "$dataset")
log_must_busy zfs destroy $args $dataset
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Dataset does not exist. ($dataset)"
return 1
fi
fi
return 0
}
#
# Firstly, create a pool with 5 datasets. Then, create a single zone and
# export the 5 datasets to it. In addition, we also add a ZFS filesystem
# and a zvol device to the zone.
#
# $1 zone name
# $2 zone root directory prefix
# $3 zone ip
#
function zfs_zones_setup #zone_name zone_root zone_ip
{
typeset zone_name=${1:-$(hostname)-z}
typeset zone_root=${2:-"/zone_root"}
typeset zone_ip=${3:-"10.1.1.10"}
typeset prefix_ctr=$ZONE_CTR
typeset pool_name=$ZONE_POOL
typeset -i cntctr=5
typeset -i i=0
# Create pool and 5 container within it
#
[[ -d /$pool_name ]] && rm -rf /$pool_name
log_must zpool create -f $pool_name $DISKS
while ((i < cntctr)); do
log_must zfs create $pool_name/$prefix_ctr$i
((i += 1))
done
# create a zvol
log_must zfs create -V 1g $pool_name/zone_zvol
block_device_wait
#
# If current system support slog, add slog device for pool
#
if verify_slog_support ; then
typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
log_must mkfile $MINVDEVSIZE $sdevs
log_must zpool add $pool_name log mirror $sdevs
fi
# this isn't supported just yet.
# Create a filesystem. In order to add this to
# the zone, it must have it's mountpoint set to 'legacy'
# log_must zfs create $pool_name/zfs_filesystem
# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
[[ -d $zone_root ]] && \
log_must rm -rf $zone_root/$zone_name
[[ ! -d $zone_root ]] && \
log_must mkdir -p -m 0700 $zone_root/$zone_name
# Create zone configure file and configure the zone
#
typeset zone_conf=/tmp/zone_conf.$$
echo "create" > $zone_conf
echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
echo "set autoboot=true" >> $zone_conf
i=0
while ((i < cntctr)); do
echo "add dataset" >> $zone_conf
echo "set name=$pool_name/$prefix_ctr$i" >> \
$zone_conf
echo "end" >> $zone_conf
((i += 1))
done
# add our zvol to the zone
echo "add device" >> $zone_conf
echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# add a corresponding zvol rdsk to the zone
echo "add device" >> $zone_conf
echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# once it's supported, we'll add our filesystem to the zone
# echo "add fs" >> $zone_conf
# echo "set type=zfs" >> $zone_conf
# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
# echo "set dir=/export/zfs_filesystem" >> $zone_conf
# echo "end" >> $zone_conf
echo "verify" >> $zone_conf
echo "commit" >> $zone_conf
log_must zonecfg -z $zone_name -f $zone_conf
log_must rm -f $zone_conf
# Install the zone
zoneadm -z $zone_name install
if (($? == 0)); then
log_note "SUCCESS: zoneadm -z $zone_name install"
else
log_fail "FAIL: zoneadm -z $zone_name install"
fi
# Install sysidcfg file
#
typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
echo "system_locale=C" > $sysidcfg
echo "terminal=dtterm" >> $sysidcfg
echo "network_interface=primary {" >> $sysidcfg
echo "hostname=$zone_name" >> $sysidcfg
echo "}" >> $sysidcfg
echo "name_service=NONE" >> $sysidcfg
echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
echo "security_policy=NONE" >> $sysidcfg
echo "timezone=US/Eastern" >> $sysidcfg
# Boot this zone
log_must zoneadm -z $zone_name boot
}
#
# Reexport TESTPOOL & TESTPOOL(1-4)
#
function reexport_pool
{
typeset -i cntctr=5
typeset -i i=0
while ((i < cntctr)); do
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
done
}
#
# Verify a given disk or pool state
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_state # pool disk state{online,offline,degraded}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
[[ -z $pool ]] || [[ -z $state ]] \
&& log_fail "Arguments invalid or missing"
if [[ -z $disk ]]; then
#check pool state only
zpool get -H -o value health $pool \
| grep -i "$state" > /dev/null 2>&1
else
zpool status -v $pool | grep "$disk" \
| grep -i "$state" > /dev/null 2>&1
fi
return $?
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
# as its mountpoint
#
function snapshot_mountpoint
{
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if [[ $dataset != *@* ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
typeset fs=${dataset%@*}
typeset snap=${dataset#*@}
if [[ -z $fs || -z $snap ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
# Given a device and 'ashift' value verify it's correctly set on every label
#
function verify_ashift # device ashift
{
typeset device="$1"
typeset ashift="$2"
zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
if (ashift != $2)
exit 1;
else
count++;
} END {
if (count != 4)
exit 1;
else
exit 0;
}'
return $?
}
#
# Given a pool and file system, this function will verify the file system
# using the zdb internal tool. Note that the pool is exported and imported
# to ensure it has consistent state.
#
function verify_filesys # pool filesystem dir
{
typeset pool="$1"
typeset filesys="$2"
typeset zdbout="/tmp/zdbout.$$"
shift
shift
typeset dirs=$@
typeset search_path=""
log_note "Calling zdb to verify filesystem '$filesys'"
zfs unmount -a > /dev/null 2>&1
log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
search_path="$search_path -d $dir"
done
fi
log_must zpool import $search_path $pool
zdb -cudi $filesys > $zdbout 2>&1
if [[ $? != 0 ]]; then
log_note "Output: zdb -cudi $filesys"
cat $zdbout
log_fail "zdb detected errors with: '$filesys'"
fi
log_must zfs mount -a
log_must rm -rf $zdbout
}
#
# Given a pool issue a scrub and verify that no checksum errors are reported.
#
function verify_pool
{
typeset pool=${1:-$TESTPOOL}
log_must zpool scrub $pool
log_must wait_scrubbed $pool
cksum=$(zpool status $pool | awk 'L{print $NF;L=0} /CKSUM$/{L=1}')
if [[ $cksum != 0 ]]; then
log_must zpool status -v
log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
fi
}
#
# Given a pool, and this function list all disks in the pool
#
function get_disklist # pool
{
typeset disklist=""
disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
grep -v "\-\-\-\-\-" | \
egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
echo $disklist
}
#
# Given a pool, and this function list all disks in the pool with their full
# path (like "/dev/sda" instead of "sda").
#
function get_disklist_fullpath # pool
{
args="-P $1"
get_disklist $args
}
# /**
# This function kills a given list of processes after a time period. We use
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
# would be listed as FAIL, which we don't want : we're happy with stress tests
# running for a certain amount of time, then finishing.
#
# @param $1 the time in seconds after which we should terminate these processes
# @param $2..$n the processes we wish to terminate.
# */
function stress_timeout
{
typeset -i TIMEOUT=$1
shift
typeset cpids="$@"
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
ps -p $pid > /dev/null 2>&1
if (($? == 0)); then
log_must kill -USR1 $pid
fi
done
}
#
# Verify a given hotspare disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_hotspare_state # pool disk state{inuse,avail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "spares")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a hotspare transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_hotspare_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_hotspare_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Verify a given slog disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_slog_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "logs")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Verify a given vdev disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_vdev_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk)
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a vdev transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_vdev_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_vdev_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
# Return 0 is contain, 1 otherwise
#
function check_pool_status # pool token keyword <verbose>
{
typeset pool=$1
typeset token=$2
typeset keyword=$3
typeset verbose=${4:-false}
scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
($1==token) {print $0}')
if [[ $verbose == true ]]; then
log_note $scan
fi
echo $scan | grep -i "$keyword" > /dev/null 2>&1
return $?
}
#
# These 6 following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool is resilver in progress
# is_pool_resilvered - to check if the pool is resilver completed
# is_pool_scrubbing - to check if the pool is scrub in progress
# is_pool_scrubbed - to check if the pool is scrub completed
# is_pool_scrub_stopped - to check if the pool is scrub stopped
# is_pool_scrub_paused - to check if the pool has scrub paused
# is_pool_removing - to check if the pool is removing a vdev
# is_pool_removed - to check if the pool is remove completed
#
function is_pool_resilvering #pool <verbose>
{
check_pool_status "$1" "scan" "resilver in progress since " $2
return $?
}
function is_pool_resilvered #pool <verbose>
{
check_pool_status "$1" "scan" "resilvered " $2
return $?
}
function is_pool_scrubbing #pool <verbose>
{
check_pool_status "$1" "scan" "scrub in progress since " $2
return $?
}
function is_pool_scrubbed #pool <verbose>
{
check_pool_status "$1" "scan" "scrub repaired" $2
return $?
}
function is_pool_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scan" "scrub canceled" $2
return $?
}
function is_pool_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scan" "scrub paused since " $2
return $?
}
function is_pool_removing #pool
{
check_pool_status "$1" "remove" "in progress since "
return $?
}
function is_pool_removed #pool
{
check_pool_status "$1" "remove" "completed on"
return $?
}
function wait_for_degraded
{
typeset pool=$1
typeset timeout=${2:-30}
typeset t0=$SECONDS
while :; do
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
log_note "$pool is not yet degraded."
sleep 1
if ((SECONDS - t0 > $timeout)); then
log_note "$pool not degraded after $timeout seconds."
return 1
fi
done
return 0
}
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.
#
function cleanup_devices #vdevs
{
typeset pool="foopool$$"
if poolexists $pool ; then
destroy_pool $pool
fi
create_pool $pool $@
destroy_pool $pool
return 0
}
#/**
# A function to find and locate free disks on a system or from given
# disks as the parameter. It works by locating disks that are in use
# as swap devices and dump devices, and also disks listed in /etc/vfstab
#
# $@ given disks to find which are free, default is all disks in
# the test system
#
# @return a string containing the list of available disks
#*/
function find_disks
{
# Trust provided list, no attempt is made to locate unused devices.
if is_linux; then
echo "$@"
return
fi
sfi=/tmp/swaplist.$$
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
swap -l > $sfi
dumpadm > $dmpi 2>/dev/null
# write an awk script that can process the output of format
# to produce a list of disks we know about. Note that we have
# to escape "$2" so that the shell doesn't interpret it while
# we're creating the awk script.
# -------------------
cat > /tmp/find_disks.awk <<EOF
#!/bin/nawk -f
BEGIN { FS="."; }
/^Specify disk/{
searchdisks=0;
}
{
if (searchdisks && \$2 !~ "^$"){
split(\$2,arr," ");
print arr[1];
}
}
/^AVAILABLE DISK SELECTIONS:/{
searchdisks=1;
}
EOF
#---------------------
chmod 755 /tmp/find_disks.awk
disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
rm /tmp/find_disks.awk
unused=""
for disk in $disks; do
# Check for mounted
grep "${disk}[sp]" /etc/mnttab >/dev/null
(($? == 0)) && continue
# Check for swap
grep "${disk}[sp]" $sfi >/dev/null
(($? == 0)) && continue
# check for dump device
grep "${disk}[sp]" $dmpi >/dev/null
(($? == 0)) && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
(($? == 0)) && continue
unused_candidates="$unused_candidates $disk"
done
rm $sfi
rm $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
# each case. limit the number to max_finddisksnum
count=0
for disk in $unused_candidates; do
if [ -b $DEV_DSKDIR/${disk}s0 ]; then
if [ $count -lt $max_finddisksnum ]; then
unused="$unused $disk"
# do not impose limit if $@ is provided
[[ -z $@ ]] && ((count = count + 1))
fi
fi
done
# finally, return our disk list
echo $unused
}
#
# Add specified user to specified group
#
# $1 group name
# $2 user name
# $3 base of the homedir (optional)
#
function add_user #<group_name> <user_name> <basedir>
{
typeset gname=$1
typeset uname=$2
typeset basedir=${3:-"/var/tmp"}
if ((${#gname} == 0 || ${#uname} == 0)); then
log_fail "group name or user name are not defined."
fi
log_must useradd -g $gname -d $basedir/$uname -m $uname
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
# Add new users to the same group and the command line utils.
# This allows them to be run out of the original users home
# directory as long as it permissioned to be group readable.
if is_linux; then
cmd_group=$(stat --format="%G" $(which zfs))
log_must usermod -a -G $cmd_group $uname
fi
return 0
}
#
# Delete the specified user.
#
# $1 login name
# $2 base of the homedir (optional)
#
function del_user #<logname> <basedir>
{
typeset user=$1
typeset basedir=${2:-"/var/tmp"}
if ((${#user} == 0)); then
log_fail "login name is necessary."
fi
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 5 userdel $user
fi
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
# Assign 100 as the base gid, a larger value is selected for
# Linux because for many distributions 1000 and under are reserved.
if is_linux; then
while true; do
groupadd $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
*) return 1 ;;
esac
done
else
typeset -i gid=100
while true; do
groupadd -g $gid $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
# The gid is not unique
4) ((gid += 1)) ;;
*) return 1 ;;
esac
done
fi
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group #<group_name>
{
typeset grp=$1
if ((${#grp} == 0)); then
log_fail "group name is necessary."
fi
if is_linux; then
getent group $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
2) return 0 ;;
# Name already exists as a group name
0) log_must groupdel $grp ;;
*) return 1 ;;
esac
else
groupmod -n $grp $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
9) log_must groupdel $grp ;;
*) return 1 ;;
esac
fi
return 0
}
#
# This function will return true if it's safe to destroy the pool passed
# as argument 1. It checks for pools based on zvols and files, and also
# files contained in a pool that may have a different mountpoint.
#
function safe_to_destroy_pool { # $1 the pool name
typeset pool=""
typeset DONT_DESTROY=""
# We check that by deleting the $1 pool, we're not
# going to pull the rug out from other pools. Do this
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
awk '{print $1}')
# this is a list of the zvols that make up the pool
ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
| awk '{print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
POOL_FILE_DIRS=$(zpool status -v $pool | \
grep / | awk '{print $1}' | \
awk -F/ '{print $2}' | grep -v "dev")
for pooldir in $POOL_FILE_DIRS
do
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
grep "${pooldir}$" | awk '{print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
if [ ! -z "$ZVOLPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ZVOLPOOL on $1"
fi
if [ ! -z "$FILEPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $FILEPOOL on $1"
fi
if [ ! -z "$ALTMOUNTPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
fi
done
if [ -z "${DONT_DESTROY}" ]
then
return 0
else
log_note "Warning: it is not safe to destroy $1!"
return 1
fi
}
#
# Get the available ZFS compression options
# $1 option type zfs_set|zfs_compress
#
function get_compress_opts
{
typeset COMPRESS_OPTS
typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
gzip-6 gzip-7 gzip-8 gzip-9"
if [[ $1 == "zfs_compress" ]] ; then
COMPRESS_OPTS="on lzjb"
elif [[ $1 == "zfs_set" ]] ; then
COMPRESS_OPTS="on off lzjb"
fi
typeset valid_opts="$COMPRESS_OPTS"
zfs get 2>&1 | grep gzip >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
valid_opts="$valid_opts $GZIP_OPTS"
fi
echo "$valid_opts"
}
#
# Verify zfs operation with -p option work as expected
# $1 operation, value could be create, clone or rename
# $2 dataset type, value could be fs or vol
# $3 dataset name
# $4 new dataset name
#
function verify_opt_p_ops
{
typeset ops=$1
typeset datatype=$2
typeset dataset=$3
typeset newdataset=$4
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
log_fail "$datatype is not supported."
fi
# check parameters accordingly
case $ops in
create)
newdataset=$dataset
dataset=""
if [[ $datatype == "vol" ]]; then
ops="create -V $VOLSIZE"
fi
;;
clone)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_must snapexists $dataset
;;
rename)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
;;
*)
log_fail "$ops is not supported."
;;
esac
# make sure the upper level filesystem does not exist
destroy_dataset "${newdataset%/*}" "-rRf"
# without -p option, operation will fail
log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
log_must zfs $ops -p $dataset $newdataset
block_device_wait
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
log_must zfs $ops -p $dataset $newdataset
fi
return 0
}
#
# Get configuration of pool
# $1 pool name
# $2 config name
#
function get_config
{
typeset pool=$1
typeset config=$2
typeset alt_root
if ! poolexists "$pool" ; then
return 1
fi
alt_root=$(zpool list -H $pool | awk '{print $NF}')
if [[ $alt_root == "-" ]]; then
value=$(zdb -C $pool | grep "$config:" | awk -F: \
'{print $2}')
else
value=$(zdb -e $pool | grep "$config:" | awk -F: \
'{print $2}')
fi
if [[ -n $value ]] ; then
value=${value#'}
value=${value%'}
fi
echo $value
return 0
}
#
# Privated function. Random select one of items from arguments.
#
# $1 count
# $2-n string
#
function _random_get
{
typeset cnt=$1
shift
typeset str="$@"
typeset -i ind
((ind = RANDOM % cnt + 1))
typeset ret=$(echo "$str" | cut -f $ind -d ' ')
echo $ret
}
#
# Random select one of item from arguments which include NONE string
#
function random_get_with_non
{
typeset -i cnt=$#
((cnt =+ 1))
_random_get "$cnt" "$@"
}
#
# Random select one of item from arguments which doesn't include NONE string
#
function random_get
{
_random_get "$#" "$@"
}
#
# Detect if the current system support slog
#
function verify_slog_support
{
typeset dir=$TEST_BASE_DIR/disk.$$
typeset pool=foo.$$
typeset vdev=$dir/a
typeset sdev=$dir/b
mkdir -p $dir
mkfile $MINVDEVSIZE $vdev $sdev
typeset -i ret=0
if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
ret=1
fi
rm -r $dir
return $ret
}
#
# The function will generate a dataset name with specific length
# $1, the length of the name
# $2, the base string to construct the name
#
function gen_dataset_name
{
typeset -i len=$1
typeset basestr="$2"
typeset -i baselen=${#basestr}
typeset -i iter=0
typeset l_name=""
if ((len % baselen == 0)); then
((iter = len / baselen))
else
((iter = len / baselen + 1))
fi
while ((iter > 0)); do
l_name="${l_name}$basestr"
((iter -= 1))
done
echo $l_name
}
#
# Get cksum tuple of dataset
# $1 dataset name
#
# sample zdb output:
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
function datasetcksum
{
typeset cksum
sync
cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
| awk -F= '{print $7}')
echo $cksum
}
#
# Get cksum of file
# #1 file path
#
function checksum
{
typeset cksum
cksum=$(cksum $1 | awk '{print $1}')
echo $cksum
}
#
# Get the given disk/slice state from the specific field of the pool
#
function get_device_state #pool disk field("", "spares","logs")
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset field=${3:-$pool}
state=$(zpool status -v "$pool" 2>/dev/null | \
nawk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
(startfield==1) && ($1==device) {print $2; exit;}
(startfield==1) &&
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
echo $state
}
#
# print the given directory filesystem type
#
# $1 directory name
#
function get_fstype
{
typeset dir=$1
if [[ -z $dir ]]; then
log_fail "Usage: get_fstype <directory>"
fi
#
# $ df -n /
# / : ufs
#
df -n $dir | awk '{print $3}'
}
#
# Given a disk, label it to VTOC regardless what label was on the disk
# $1 disk
#
function labelvtoc
{
typeset disk=$1
if [[ -z $disk ]]; then
log_fail "The disk name is unspecified."
fi
typeset label_file=/var/tmp/labelvtoc.$$
typeset arch=$(uname -p)
if is_linux; then
log_note "Currently unsupported by the test framework"
return 1
fi
if [[ $arch == "i386" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
echo "q" >> $label_file
fdisk -B $disk >/dev/null 2>&1
# wait a while for fdisk finishes
sleep 60
elif [[ $arch == "sparc" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
else
log_fail "unknown arch type"
fi
format -e -s -d $disk -f $label_file
typeset -i ret_val=$?
rm -f $label_file
#
# wait the format to finish
#
sleep 60
if ((ret_val != 0)); then
log_fail "unable to label $disk as VTOC."
fi
return 0
}
#
# check if the system was installed as zfsroot or not
# return: 0 ture, otherwise false
#
function is_zfsroot
{
df -n / | grep zfs > /dev/null 2>&1
return $?
}
#
# get the root filesystem name if it's zfsroot system.
#
# return: root filesystem name
function get_rootfs
{
typeset rootfs=""
if ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
}
#
# get the rootfs's pool name
# return:
# rootpool name
#
function get_rootpool
{
typeset rootfs=""
typeset rootpool=""
if ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootpool"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
echo $rootpool
else
log_fail "This is not a zfsroot system."
fi
}
#
# Get the package name
#
function get_package_name
{
typeset dirpath=${1:-$STC_NAME}
echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
}
#
# Get the word numbers from a string separated by white space
#
function get_word_count
{
echo $1 | wc -w
}
#
# To verify if the require numbers of disks is given
#
function verify_disk_count
{
typeset -i min=${2:-1}
typeset -i count=$(get_word_count "$1")
if ((count < min)); then
log_untested "A minimum of $min disks is required to run." \
" You specified $count disk(s)"
fi
}
function ds_is_volume
{
typeset type=$(get_prop type $1)
[[ $type = "volume" ]] && return 0
return 1
}
function ds_is_filesystem
{
typeset type=$(get_prop type $1)
[[ $type = "filesystem" ]] && return 0
return 1
}
function ds_is_snapshot
{
typeset type=$(get_prop type $1)
[[ $type = "snapshot" ]] && return 0
return 1
}
#
# Check if Trusted Extensions are installed and enabled
#
function is_te_enabled
{
svcs -H -o state labeld 2>/dev/null | grep "enabled"
if (($? != 0)); then
return 1
else
return 0
fi
}
# Utility function to determine if a system has multiple cpus.
function is_mp
{
if is_linux; then
(($(nproc) > 1))
else
(($(psrinfo | wc -l) > 1))
fi
return $?
}
function get_cpu_freq
{
if is_linux; then
lscpu | awk '/CPU MHz/ { print $3 }'
else
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
fi
}
# Run the given command as the user provided.
function user_run
{
typeset user=$1
shift
log_note "user:$user $@"
eval su - \$user -c \"$@\" > $TEST_BASE_DIR/out 2>$TEST_BASE_DIR/err
return $?
}
#
# Check if the pool contains the specified vdevs
#
# $1 pool
# $2..n <vdev> ...
#
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
# vdevs is not in the pool, and 2 if pool name is missing.
#
function vdevs_in_pool
{
typeset pool=$1
typeset vdev
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 2
fi
shift
# We could use 'zpool list' to only get the vdevs of the pool but we
# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
# therefore we use the 'zpool status' output.
typeset tmpfile=$(mktemp)
zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
for vdev in $@; do
grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
[[ $? -ne 0 ]] && return 1
done
rm -f $tmpfile
return 0;
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$(echo $((max > i ? max : i)))
done
echo $max
}
function get_min
{
typeset -l i min=$1
shift
for i in "$@"; do
min=$(echo $((min < i ? min : i)))
done
echo $min
}
#
# Generate a random number between 1 and the argument.
#
function random
{
typeset max=$1
echo $(( ($RANDOM % $max) + 1 ))
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
log_must eval "fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory=$dir \
--numjobs=$nfiles \
--nrfiles=$nfiles \
--rw=write \
--bs=$bs \
--filesize=$megs \
--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
objnum=$(stat -c %i $pathname)
echo $objnum
}
#
# Sync data to the pool
#
# $1 pool name
# $2 boolean to force uberblock (and config including zpool cache file) update
#
function sync_pool #pool <force>
{
typeset pool=${1:-$TESTPOOL}
typeset force=${2:-false}
if [[ $force == true ]]; then
log_must zpool sync -f $pool
else
log_must zpool sync $pool
fi
return 0
}
#
# Wait for zpool 'freeing' property drops to zero.
#
# $1 pool name
#
function wait_freeing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
log_must sleep 1
done
}
#
# Wait for every device replace operation to complete
#
# $1 pool name
#
function wait_replacing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "" == "$(zpool status $pool |
awk '/replacing-[0-9]+/ {print $1}')" ]] && break
log_must sleep 1
done
}
#
# Wait for a pool to be scrubbed
#
# $1 pool name
# $2 number of seconds to wait (optional)
#
# Returns true when pool has been scrubbed, or false if there's a timeout or if
# no scrub was done.
#
function wait_scrubbed
{
typeset pool=${1:-$TESTPOOL}
while true ; do
is_pool_scrubbed $pool && break
log_must sleep 1
done
}
# Backup the zed.rc in our test directory so that we can edit it for our test.
#
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
function zed_rc_backup
{
zedrc_backup="$(mktemp)"
cp $ZEDLET_DIR/zed.rc $zedrc_backup
echo $zedrc_backup
}
function zed_rc_restore
{
mv $1 $ZEDLET_DIR/zed.rc
}
#
# Setup custom environment for the ZED.
#
# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
return
fi
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
if [[ ! -e $VDEVID_CONF ]]; then
log_must touch $VDEVID_CONF
fi
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
# Setup minimal ZED configuration. Individual test cases should
# add additional ZEDLETs as needed for their specific test.
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
# Scripts must only be user writable.
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
saved_umask=$(umask)
log_must umask 0022
for i in $EXTRA_ZEDLETS ; do
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
done
log_must umask $saved_umask
fi
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
}
#
# Cleanup custom ZED environment.
#
# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
EXTRA_ZEDLETS=$@
log_must rm -f ${ZEDLET_DIR}/zed.rc
log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
log_must rm -f ${ZEDLET_DIR}/all-debug.sh
log_must rm -f ${ZEDLET_DIR}/state
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
for i in $EXTRA_ZEDLETS ; do
log_must rm -f ${ZEDLET_DIR}/$i
done
fi
log_must rm -f $ZED_LOG
log_must rm -f $ZED_DEBUG_LOG
log_must rm -f $VDEVID_CONF_ETC
log_must rm -f $VDEVID_CONF
rmdir $ZEDLET_DIR
}
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
if ! is_linux; then
return
fi
# ZEDLET_DIR=/var/tmp/zed
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
# Verify the ZED is not already running.
pgrep -x zed > /dev/null
if (($? == 0)); then
log_fail "ZED already running"
fi
log_note "Starting ZED"
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
"-s $ZEDLET_DIR/state 2>$ZED_LOG &"
return 0
}
#
# Kill ZED process
#
function zed_stop
{
if ! is_linux; then
return
fi
log_note "Stopping ZED"
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
zedpid=$(<${ZEDLET_DIR}/zed.pid)
kill $zedpid
while ps -p $zedpid > /dev/null; do
sleep 1
done
rm -f ${ZEDLET_DIR}/zed.pid
fi
return 0
}
#
# Drain all zevents
#
function zed_events_drain
{
while [ $(zpool events -H | wc -l) -ne 0 ]; do
sleep 1
zpool events -c >/dev/null
done
}
# Set a variable in zed.rc to something, un-commenting it in the process.
#
# $1 variable
# $2 value
function zed_rc_set
{
var="$1"
val="$2"
# Remove the line
cmd="'/$var/d'"
eval sed -i $cmd $ZEDLET_DIR/zed.rc
# Add it at the end
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
}
#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
{
typeset device=$1
if [[ -z $device ]] ; then
log_note "No device specified."
return 1
fi
if is_linux; then
swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
else
swap -l | grep -w $device > /dev/null 2>&1
fi
return $?
}
#
# Setup a swap device using the provided device.
#
function swap_setup
{
typeset swapdev=$1
if is_linux; then
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
else
log_must swap -a $swapdev
fi
return 0
}
#
# Cleanup a swap device on the provided device.
#
function swap_cleanup
{
typeset swapdev=$1
if is_swap_inuse $swapdev; then
if is_linux; then
log_must swapoff $swapdev
else
log_must swap -d $swapdev
fi
fi
return 0
}
#
# Set a global system tunable (64-bit value)
#
# $1 tunable name
# $2 tunable values
#
function set_tunable64
{
set_tunable_impl "$1" "$2" Z
}
#
# Set a global system tunable (32-bit value)
#
# $1 tunable name
# $2 tunable values
#
function set_tunable32
{
set_tunable_impl "$1" "$2" W
}
function set_tunable_impl
{
typeset tunable="$1"
typeset value="$2"
typeset mdb_cmd="$3"
typeset module="${4:-zfs}"
[[ -z "$tunable" ]] && return 1
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -w "$zfs_tunables/$tunable" ]] || return 1
echo -n "$value" > "$zfs_tunables/$tunable"
return "$?"
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
return "$?"
;;
esac
}
#
# Get a global system tunable
#
# $1 tunable name
#
function get_tunable
{
get_tunable_impl "$1"
}
function get_tunable_impl
{
typeset tunable="$1"
typeset module="${2:-zfs}"
[[ -z "$tunable" ]] && return 1
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -f "$zfs_tunables/$tunable" ]] || return 1
cat $zfs_tunables/$tunable
return "$?"
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
;;
esac
return 1
}
#
# Prints the current time in seconds since UNIX Epoch.
#
function current_epoch
{
printf '%(%s)T'
}
#
# Get decimal value of global uint32_t variable using mdb.
#
function mdb_get_uint32
{
typeset variable=$1
typeset value
value=$(mdb -k -e "$variable/X | ::eval .=U")
if [[ $? -ne 0 ]]; then
log_fail "Failed to get value of '$variable' from mdb."
return 1
fi
echo $value
return 0
}
#
# Set global uint32_t variable to a decimal value using mdb.
#
function mdb_set_uint32
{
typeset variable=$1
typeset value=$2
mdb -kw -e "$variable/W 0t$value" > /dev/null
if [[ $? -ne 0 ]]; then
echo "Failed to set '$variable' to '$value' in mdb."
return 1
fi
return 0
}
#
# Set global scalar integer variable to a hex value using mdb.
# Note: Target should have CTF data loaded.
#
function mdb_ctf_set_int
{
typeset variable=$1
typeset value=$2
mdb -kw -e "$variable/z $value" > /dev/null
if [[ $? -ne 0 ]]; then
echo "Failed to set '$variable' to '$value' in mdb."
return 1
fi
return 0
}