drone-kaniko/kaniko.go
surtur a34e88943a
All checks were successful
continuous-integration/drone/push Build is passing
fix: temporary patch for kaniko on cgroup v2 hosts
This is a temporary workaround that enables kaniko to run on cgroup v2
enabled hosts. Due to an upstream issue, kaniko fails to detect that it
is indeed being run from a container.
Solution introduced here is to force kaniko to run regardless.

ref: https://github.com/GoogleContainerTools/kaniko/issues/1592
2021-05-02 01:37:56 +02:00

158 lines
5.1 KiB
Go

package kaniko
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"git.dotya.ml/wanderer/drone-kaniko/cmd/artifact"
)
type (
// Build defines Docker build parameters.
Build struct {
Dockerfile string // Docker build Dockerfile
Context string // Docker build context
Tags []string // Docker build tags
Args []string // Docker build args
Target string // Docker build target
Repo string // Docker build repository
Labels []string // Label map
SkipTlsVerify bool // Docker skip tls certificate verify for registry
SnapshotMode string // Kaniko snapshot mode
EnableCache bool // Whether to enable kaniko cache
CacheRepo string // Remote repository that will be used to store cached layers
CacheTTL int // Cache timeout in hours
DigestFile string // Digest file location
NoPush bool // Set this flag if you only want to build the image, without pushing to a registry
}
// Artifact defines content of artifact file
Artifact struct {
Tags []string // Docker artifact tags
Repo string // Docker artifact repository
Registry string // Docker artifact registry
RegistryType artifact.RegistryTypeEnum // Rocker artifact registry type
ArtifactFile string // Artifact file location
}
// Plugin defines the Docker plugin parameters.
Plugin struct {
Build Build // Docker build configuration
Artifact Artifact // Artifact file content
}
)
// Exec executes the plugin step
func (p Plugin) Exec() error {
if _, err := os.Stat(p.Build.Dockerfile); os.IsNotExist(err) {
return fmt.Errorf("dockerfile does not exist at path: %s", p.Build.Dockerfile)
}
cmdArgs := []string{
fmt.Sprintf("--dockerfile=%s", p.Build.Dockerfile),
fmt.Sprintf("--context=dir://%s", p.Build.Context),
}
if p.Build.Repo == "" {
fmt.Println("repository name to publish image not specified, adding '--no-push' flag")
cmdArgs = append(cmdArgs, fmt.Sprintf("--no-push"))
} else {
// Set the destination repository
for _, tag := range p.Build.Tags {
cmdArgs = append(cmdArgs, fmt.Sprintf("--destination=%s:%s", p.Build.Repo, tag))
}
}
// Set the build arguments
for _, arg := range p.Build.Args {
cmdArgs = append(cmdArgs, fmt.Sprintf("--build-arg=%s", arg))
}
// Set the labels
for _, label := range p.Build.Labels {
cmdArgs = append(cmdArgs, fmt.Sprintf("--label=%s", label))
}
if p.Build.Target != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--target=%s", p.Build.Target))
}
if p.Build.SkipTlsVerify {
cmdArgs = append(cmdArgs, fmt.Sprintf("--skip-tls-verify=true"))
}
if p.Build.SnapshotMode != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--snapshotMode=%s", p.Build.SnapshotMode))
}
if p.Build.EnableCache == true {
cmdArgs = append(cmdArgs, fmt.Sprintf("--cache=true"))
}
if p.Build.CacheRepo != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--cache-repo=%s", p.Build.CacheRepo))
}
if p.Build.CacheTTL != 0 {
cmdArgs = append(cmdArgs, fmt.Sprintf("--cache-ttl=%d", p.Build.CacheTTL))
}
if p.Build.DigestFile != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--digest-file=%s", p.Build.DigestFile))
}
if p.Build.NoPush {
pushNeeded := true
for _, arg := range cmdArgs {
if arg == "--no-push" {
pushNeeded = false
break
}
}
if pushNeeded {
cmdArgs = append(cmdArgs, fmt.Sprintf("--no-push"))
}
}
// due to kaniko spontaneously failing to detect it's being run in a container
// it's probably for the better to always force-run here until this is resolved
// and fixed upstream. Since all of our runners are on cgroup v2 hosts, this
// issue is quite impactful for us.
// ref: https://github.com/GoogleContainerTools/kaniko/issues/1592
//
// example error message: '/kaniko/executor --dockerfile=Dockerfile --context=dir://.
// --no-push kaniko should only be run inside of a container, run with the --force
// flag if you are sure you want to continue'
// another ref: https://drone.dotya.ml/wanderer/docker-archlinux-cdev/5960/2/2
cmdArgs = append(cmdArgs, fmt.Sprintf("--force"))
cmd := exec.Command("/kaniko/executor", cmdArgs...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
trace(cmd)
err := cmd.Run()
if err != nil {
return err
}
if p.Build.DigestFile != "" && p.Artifact.ArtifactFile != "" {
content, err := ioutil.ReadFile(p.Build.DigestFile)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to read digest file contents at path: %s with error: %s\n", p.Build.DigestFile, err)
}
err = artifact.WritePluginArtifactFile(p.Artifact.RegistryType, p.Artifact.ArtifactFile, p.Artifact.Registry, p.Artifact.Repo, string(content), p.Artifact.Tags)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to write plugin artifact file at path: %s with error: %s\n", p.Artifact.ArtifactFile, err)
}
}
return nil
}
// trace writes each command to stdout with the command wrapped in an xml
// tag so that it can be extracted and displayed in the logs.
func trace(cmd *exec.Cmd) {
fmt.Fprintf(os.Stdout, "+ %s\n", strings.Join(cmd.Args, " "))
}