kaniko-job-test/main.go
2023-08-26 13:32:44 +08:00

321 lines
8.6 KiB
Go

package main
import (
"context"
"encoding/json"
"fmt"
// "go/types"
"os"
apiv1 "k8s.io/api/core/v1"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/hashicorp/terraform-plugin-log/tflog"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/utils/pointer"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/seal-io/terraform-provider-kaniko/utils"
apibatchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
defaultNamespace = "default"
kanikoImage = "gcr.io/kaniko-project/executor:v1.5.1"
inClusterNamespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
)
type DockerConfigJSON struct {
Auths map[string]authn.AuthConfig
}
type imageResourceModel struct {
ID types.String `tfsdk:"id"`
GitUsername types.String `tfsdk:"git_username"`
GitPassword types.String `tfsdk:"git_password"`
Context types.String `tfsdk:"context"`
Dockerfile types.String `tfsdk:"dockerfile"`
Destination types.String `tfsdk:"destination"`
BuildArg types.Map `tfsdk:"build_arg"`
RegistryUsername types.String `tfsdk:"registry_username"`
RegistryPassword types.String `tfsdk:"registry_password"`
Cache types.Bool `tfsdk:"cache"`
NoPush types.Bool `tfsdk:"no_push"`
PushRetry types.Int64 `tfsdk:"push_retry"`
Reproducible types.Bool `tfsdk:"reproducible"`
Verbosity types.String `tfsdk:"verbosity"`
}
type runOptions struct {
ID string
GitRevision string
GitUsername string
GitPassword string
Context string
Dockerfile string
Destination string
BuildArg map[string]string
RegistryUsername string
RegistryPassword string
Cache bool
NoPush bool
Reproducible bool
PushRetry int64
Verbosity string
}
func getKanikoJob(namespace string, opts *runOptions) *apibatchv1.Job {
args := []string{
fmt.Sprintf("--dockerfile=%s", opts.Dockerfile),
fmt.Sprintf("--context=%s", opts.Context),
fmt.Sprintf("--destination=%s", opts.Destination),
fmt.Sprintf("--push-retry=%d", opts.PushRetry),
fmt.Sprintf("--verbosity=%s", opts.Verbosity),
}
var volumeMounts []apiv1.VolumeMount
var volumes []apiv1.Volume
if opts.RegistryUsername != "" && opts.RegistryPassword != "" {
volumeMounts = append(volumeMounts, apiv1.VolumeMount{
Name: "docker-config",
MountPath: "/kaniko/.docker/",
})
volumes = append(volumes, apiv1.Volume{
Name: "docker-config",
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: opts.ID,
},
},
})
}
return &apibatchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: opts.ID,
},
Spec: apibatchv1.JobSpec{
BackoffLimit: pointer.Int32(0),
TTLSecondsAfterFinished: pointer.Int32(3600),
Template: apiv1.PodTemplateSpec{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "build",
Image: kanikoImage,
Args: args,
VolumeMounts: volumeMounts,
},
},
Volumes: volumes,
RestartPolicy: apiv1.RestartPolicyNever,
},
},
},
}
}
func getDockerConfigSecret(namespace, name, registry, username, password string) (*apiv1.Secret, error) {
cfg := DockerConfigJSON{
Auths: map[string]authn.AuthConfig{
registry: {
Username: username,
Password: password,
},
},
}
data, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
return &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string][]byte{
"config.json": data,
},
}, nil
}
func kanikoBuild(ctx context.Context, restConfig *rest.Config, opts *runOptions) error {
coreV1Client, err := v1.NewForConfig(restConfig)
if err != nil {
return err
}
batchV1Client, err := batchv1.NewForConfig(restConfig)
if err != nil {
return err
}
namespace := defaultNamespace
if _, err = os.Stat(inClusterNamespaceFile); err == nil {
namespaceBytes, err := os.ReadFile(inClusterNamespaceFile)
if err == nil {
namespace = string(namespaceBytes)
}
}
ref, err := name.ParseReference(opts.Destination)
if err != nil {
return err
}
registry := fmt.Sprintf("https://%s/v1/", ref.Context().RegistryStr())
secret, err := getDockerConfigSecret(namespace, opts.ID, registry, opts.RegistryUsername, opts.RegistryPassword)
if err != nil {
return err
}
if _, err := coreV1Client.Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
return err
}
job := getKanikoJob(namespace, opts)
if _, err := batchV1Client.Jobs(namespace).Create(ctx, job, metav1.CreateOptions{}); err != nil {
return err
}
defer func() {
// Clean up.
fmt.Println("Cleaning up kaniko job")
if err = batchV1Client.Jobs(namespace).Delete(ctx, opts.ID, metav1.DeleteOptions{}); err != nil {
fmt.Println("failed to clean up kaniko job", map[string]any{"error": err})
tflog.Warn(ctx, "failed to clean up kaniko job", map[string]any{"error": err})
}
if err = coreV1Client.Secrets(namespace).Delete(ctx, opts.ID, metav1.DeleteOptions{}); err != nil {
fmt.Println("failed to clean up kaniko secret", map[string]any{"error": err})
tflog.Warn(ctx, "failed to clean up kaniko secret", map[string]any{"error": err})
}
}()
pw, err := batchV1Client.Jobs(namespace).Watch(ctx, metav1.ListOptions{})
if err != nil {
return err
}
for e := range pw.ResultChan() {
p, ok := e.Object.(*apibatchv1.Job)
// fmt.Println("p", p)
// fmt.Printf("%v\n", p)
if !ok {
tflog.Warn(ctx, "unexpected k8s resource event", map[string]any{"event": e})
fmt.Println("unexpected k8s resource event", map[string]any{"event": e})
continue
}
if p.Name != opts.ID {
fmt.Println("p.Name != opts.ID", map[string]any{"p.Name": p.Name, "opts.ID": opts.ID})
continue
}
if p.Status.CompletionTime != nil {
// Succeeded.
fmt.Println("Kaniko job completed successfully")
break
}
if p.Status.Failed > 0 {
logs, err := getJobPodsLogs(ctx, namespace, opts.ID, restConfig)
if err != nil {
fmt.Print("kaniko job failed, but cannot get pod logs: %w", err)
return fmt.Errorf("kaniko job failed, but cannot get pod logs: %w", err)
}
fmt.Println("kaniko job failed", map[string]any{"logs": logs})
return fmt.Errorf("build logs: %s", logs)
}
}
return nil
}
// getJobPodsLogs returns the logs of all pods of a job.
func getJobPodsLogs(ctx context.Context, namespace, jobName string, restConfig *rest.Config) (string, error) {
clientSet, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return "", err
}
ls := "job-name=" + jobName
pods, err := clientSet.CoreV1().Pods(namespace).
List(ctx, metav1.ListOptions{LabelSelector: ls})
if err != nil {
return "", err
}
var logs string
for _, pod := range pods.Items {
var podLogs []byte
podLogs, err = clientSet.CoreV1().Pods(namespace).GetLogs(pod.Name, &apiv1.PodLogOptions{}).DoRaw(ctx)
if err != nil {
return "", err
}
logs += string(podLogs)
}
return logs, nil
}
func main() {
// 创建 Kubernetes 集群的配置
// config, err := rest.InClusterConfig()
// if err != nil {
// fmt.Printf("Error creating in cluster config: %v\n", err)
// return
// }
// var config kanikoProviderModel
config, err := utils.LoadConfig("/Users/yangzun/.kube/config")
if err != nil {
fmt.Printf("Error creating in cluster config: %v\n", err.Error())
return
}
// Retrieve values from plan.
gitUsername := os.Getenv("GIT_USERNAME")
gitPassword := os.Getenv("GIT_PASSWORD")
registryUsername := os.Getenv("REGISTRY_USERNAME")
registryPassword := os.Getenv("REGISTRY_PASSWORD")
var pushRetry int64 = 5
verbosity := "debug"
id := fmt.Sprintf("kaniko-%s", utils.String(8))
opts := &runOptions{
ID: id,
GitPassword: gitPassword,
GitUsername: gitUsername,
RegistryUsername: registryUsername,
RegistryPassword: registryPassword,
Context: "git://gitlab-ee.treesir.pub/demotest/walrus/simple-web-service.git",
Dockerfile: "Dockerfile",
Destination: "harbor.treesir.pub/yangzun/kaniko-job-test:v1",
Cache: true,
NoPush: true,
PushRetry: pushRetry,
Reproducible: true,
Verbosity: verbosity,
}
// 创建 context 对象
ctx := context.TODO()
// 运行 kanikoBuild 函数
err = kanikoBuild(ctx, config, opts)
if err != nil {
fmt.Printf("Error running kanikoBuild: %v\n", err)
return
}
fmt.Println("Kaniko job completed successfully")
}