1
1
Fork 0
mirror of https://github.com/containers/udica synced 2024-05-10 23:36:11 +02:00

Cirrus: Use get_ci_vm container image

This replaces the duplicated script, with a small frontend that uses a
common container image to do most of the work.

Signed-off-by: Chris Evich <cevich@redhat.com>
This commit is contained in:
Chris Evich 2021-04-14 10:39:22 -04:00 committed by Lukas Vrabec
parent a828069ff3
commit dc00aef98c
3 changed files with 77 additions and 244 deletions

View File

@ -18,12 +18,13 @@ env:
####
#### Cache-image names to test with
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
_BUILT_IMAGE_SUFFIX: "c5032481331085312"
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
FEDORA_NAME: "fedora-34beta"
PRIOR_FEDORA_NAME: "fedora-33"
# Google-cloud VM Images
IMAGE_SUFFIX: "c5032481331085312"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
@ -43,7 +44,7 @@ timeout_in: 120m
# Default VM to use unless set or modified by task
gce_instance:
image_project: "${IMAGE_PROJECT}"
image_project: "libpod-218412"
zone: "us-central1-c" # Required by Cirrus for the time being
cpu: 2
memory: "4Gb"
@ -53,11 +54,15 @@ gce_instance:
# Each 'task' runs in parallel, '_task' suffix required on name.
test_upstream_podman_task:
alias: test_upstream_podman
gce_instance: # Only need to specify differences from defaults (above)
matrix: # Duplicate this task for each matrix product.
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
matrix:
- name: "Test podman on ${FEDORA_NAME}"
gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
- name: "Test podman on ${PRIOR_FEDORA_NAME}"
gce_instance:
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
env:
# Which branch, tag, or sha of podman repository to test against
@ -78,7 +83,7 @@ test_upstream_podman_task:
meta_task:
container:
image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}"
image: "quay.io/libpod/imgts:latest"
cpu: 1
memory: 1

View File

@ -8,7 +8,10 @@ show_env_vars
case "${OS_RELEASE_ID}" in
fedora)
echo "Installing necessary additional packages"
msg "Expanding root disk space"
growpart /dev/sda 1
resize2fs /dev/sda1
msg "Installing necessary additional packages"
ooe.sh dnf install -y \
python3 \
setools-console \
@ -20,5 +23,3 @@ esac
echo "Configuring git for access to podman pull-requests"
NEWREF='+refs/pull/*/head:refs/remotes/upstream/pr/*'
git config --global --replace-all remote.origin.fetch "$NEWREF"
# helpful when debugging w/ hack/get_ci_vm.sh
git config --global --replace-all remote.upstream.fetch "$NEWREF"

View File

@ -1,237 +1,64 @@
#!/bin/bash
#!/usr/bin/env bash
#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by Red Hat udica developers who need
# to debug problems specifically related to Cirrus-CI automated testing.
# It requires that you have been granted prior access to create VMs in
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
# with supervision upon request.
set -e
RED="\e[1;36;41m"
YEL="\e[1;33;44m"
NOR="\e[0m"
USAGE_WARNING="
${YEL}WARNING: This will not work without local sudo access to run podman,${NOR}
${YEL}and prior authorization to use the udica GCP project. Also,${NOR}
${YEL}possession of the proper ssh private key is required.${NOR}
"
# TODO: Many/most of these values should come from .cirrus.yml
ZONE="${ZONE:-us-central1-c}"
CPUS="2"
MEMORY="4Gb"
DISK="200"
PROJECT="udica-247612"
SRC="/tmp/udica"
GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest}
GCLOUD_SUDO=${GCLOUD_SUDO-sudo}
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# Shared tmp directory between container and us
TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX)
UDICAROOT=$(realpath "$(dirname $0)/../") # on client
# else: Assume $PWD is the root of the udica repository
[[ "$UDICAROOT" != "/" ]] || UDICAROOT=$PWD
SRCDIR=/tmp/udica # on remote VM
# Command shortcuts save some typing (asumes $UDICAROOT is subdir of $HOME)
SSHUSER="root"
PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $UDICAROOT:$UDICAROOT $GCLOUD_IMAGE --configuration=udica --project=$PROJECT"
SCP_CMD="$PGCLOUD compute scp"
showrun() {
if [[ "$1" == "--background" ]]
then
shift
# Properly escape any nested spaces, so command can be copy-pasted
echo '+ '$(printf " %q" "$@")' &' > /dev/stderr
"$@" &
echo -e "${RED}<backgrounded>${NOR}"
else
echo '+ '$(printf " %q" "$@") > /dev/stderr
"$@"
# Help detect if we were called by get_ci_vm container
GET_CI_VM="${GET_CI_VM:-0}"
in_get_ci_vm() {
if ((GET_CI_VM==0)); then
echo "Error: $1 is not intended for use in this context"
exit 2
fi
}
cleanup() {
RET=$?
set +e
wait
# set GCLOUD_DEBUG to leave tmpdir behind for postmortem
test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR
# Not always called from an exit handler, but should always exit when called
exit $RET
}
trap cleanup EXIT
delvm() {
echo -e "\n"
echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}"
echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
showrun $CLEANUP_CMD # prompts for Yes/No
cleanup
}
image_hints() {
_BIS=$(egrep -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \
"$UDICAROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]')
egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
"$UDICAROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \
sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u
}
show_usage() {
echo -e "\n${RED}ERROR: $1${NOR}"
echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}"
echo "Use -m <SPECIALMODE> with a supported value documented in contrib/cirrus/README.md."
echo "With '-m rootless' must also specify -u <ROOTLESS_USER> with name of user to create & use"
echo ""
if [[ -r ".cirrus.yml" ]]
then
echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
image_hints
echo ""
fi
exit 1
}
get_env_vars() {
python -c '
import yaml
env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"]
keys=[k for k in env if "ENCRYPTED" not in str(env[k])]
for k,v in env.items():
v=str(v)
if "ENCRYPTED" not in v:
print "{0}=\"{1}\"".format(k, v),
'
}
parse_args(){
echo -e "$USAGE_WARNING"
if [[ "$USER" =~ "root" ]]
then
show_usage "This script must be run as a regular user."
fi
ENVS="$(get_env_vars)"
[[ "$#" -ge "1" ]] || \
show_usage "Must specify at least one command-line parameter."
IMAGE_NAME="$1"
if [[ -z "$IMAGE_NAME" ]]
then
show_usage "No image-name specified."
fi
SETUP_CMD="$SRCDIR/contrib/cirrus/setup.sh"
BUILD_CMD="$SRCDIR/contrib/cirrus/build.sh"
VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}"
CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image-project=libpod-218412 --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME"
SSH_CMD="$PGCLOUD compute ssh $SSHUSER@$VMNAME"
CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
}
##### main
[[ "${UDICAROOT%%${UDICAROOT##$HOME}}" == "$HOME" ]] || \
show_usage "Repo clone must be sub-dir of $HOME"
cd "$UDICAROOT"
parse_args "$@"
# Ensure mount-points and data directories exist on host as $USER. Also prevents
# permission-denied errors during cleanup() b/c `sudo podman` created mount-points
# owned by root.
mkdir -p $TMPDIR/${UDICAROOT##$HOME}
mkdir -p $TMPDIR/.ssh
mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh
chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh
cd $UDICAROOT
# Attempt to determine if named 'udica' gcloud configuration exists
showrun $PGCLOUD info > $TMPDIR/gcloud-info
if egrep -q "Account:.*None" $TMPDIR/gcloud-info
then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for udica, running init.${NOR}"
echo -e " ${RED}Please choose "#1: Re-initialize" and "login" if asked.${NOR}"
showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics
# Verify it worked (account name == someone@example.com)
$PGCLOUD info > $TMPDIR/gcloud-info-after-init
if egrep -q "Account:.*None" $TMPDIR/gcloud-info-after-init
then
echo -e "${RED}ERROR: Could not initialize udica configuration in gcloud.${NOR}"
exit 5
fi
# If this is the only config, make it the default to avoid persistent warnings from gcloud
[[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \
ln "$HOME/.config/gcloud/configurations/config_udica" \
"$HOME/.config/gcloud/configurations/config_default"
# get_ci_vm APIv1 container entrypoint calls into this script
# to obtain required repo. specific configuration options.
if [[ "$1" == "--config" ]]; then
in_get_ci_vm "$1"
cat <<EOF
DESTDIR="/tmp/udica"
UPSTREAM_REPO="https://github.com/containers/udica.git"
GCLOUD_PROJECT="udica-247612"
GCLOUD_IMGPROJECT="libpod-218412"
GCLOUD_CFG="udica"
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-c}"
GCLOUD_CPUS="2"
GCLOUD_MEMORY="4Gb"
GCLOUD_DISK="200"
EOF
elif [[ "$1" == "--setup" ]]; then
in_get_ci_vm "$1"
# get_ci_vm container entrypoint calls us with this option on the
# Cirrus-CI environment instance, to perform repo.-specific setup.
cd $REPO_DIRPATH
echo "+ Loading ./contrib/cirrus/lib.sh" > /dev/stderr
source ./contrib/cirrus/lib.sh
echo "+ Running environment setup" > /dev/stderr
./contrib/cirrus/setup.sh
else
# Create and access VM for specified Cirrus-CI task
mkdir -p $HOME/.config/gcloud/ssh
podman run -it --rm \
--tz=local \
-e NAME="$USER" \
-e SRCDIR=/src \
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
-e DEBUG="${DEBUG:-0}" \
-v $REPO_DIRPATH:/src:O \
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
quay.io/libpod/get_ci_vm:latest "$@"
fi
# Couldn't make rsync work with gcloud's ssh wrapper because ssh-keys generated on the fly
TARBALL=$VMNAME.tar.bz2
echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}"
showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $UDICAROOT .
trap delvm INT # Allow deleting VM if CTRL-C during create
# This fails if VM already exists: permit this usage to re-init
echo -e "\n${YEL}Trying to creating a VM named $VMNAME${NOR}\n${YEL}in GCE region/zone $ZONE${NOR}"
echo -e "For faster access, export ZONE='something-closer-<any letter>'"
echo 'List of regions and zones: https://cloud.google.com/compute/docs/regions-zones/'
echo -e "${RED}(might take a minute/two. Errors ignored).${NOR}"
showrun $CREATE_CMD || true # allow re-running commands below when "delete: N"
# Any subsequent failure should prompt for VM deletion
trap - INT
trap delvm EXIT
echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
trap 'COUNT=9999' INT
ATTEMPTS=10
for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ ))
do
if $SSH_CMD --command "true"; then break; else sleep 3s; fi
done
if (( COUNT > $ATTEMPTS ))
then
echo -e "\n${RED}Failed${NOR}"
exit 7
fi
echo -e "${YEL}Got it${NOR}"
echo -e "\n${YEL}Removing and re-creating $SRCDIR on $VMNAME.${NOR}"
showrun $SSH_CMD --command "rm -rf $SRCDIR"
showrun $SSH_CMD --command "mkdir -p $SRCDIR"
echo -e "\n${YEL}Transfering tarball to $VMNAME.${NOR}"
wait
showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL
echo -e "\n${YEL}Unpacking tarball into $SRCDIR on $VMNAME.${NOR}"
showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $SRCDIR"
echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}"
showrun $SSH_CMD --command "rm -f /tmp/$TARBALL"
echo -e "\n${YEL}Executing environment setup${NOR}"
showrun $SSH_CMD --command "$SETUP_CMD"
echo -e "\n${YEL}Executing podman build${NOR}"
showrun $SSH_CMD --command "$BUILD_CMD"
VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n"
if [[ -n "$ROOTLESS_USER" ]]
then
echo "Re-chowning source files after transfer"
showrun $SSH_CMD --command "chown -R $ROOTLESS_USER $SRCDIR"
echo "Connecting as user $ROOTLESS_USER"
SSH_CMD="$PGCLOUD compute ssh $ROOTLESS_USER@$VMNAME"
fi
showrun $SSH_CMD -- -t "cd $SRCDIR && exec env $ENVS bash -il"