315 lines
8.3 KiB
Go
315 lines
8.3 KiB
Go
package main
|
||
|
||
import (
|
||
"context"
|
||
"encoding/json"
|
||
"fmt"
|
||
"time"
|
||
|
||
// "go/types"
|
||
"os"
|
||
|
||
apiv1 "k8s.io/api/core/v1"
|
||
"k8s.io/utils/pointer"
|
||
|
||
"github.com/google/go-containerregistry/pkg/authn"
|
||
"github.com/google/go-containerregistry/pkg/name"
|
||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||
"k8s.io/client-go/kubernetes"
|
||
"k8s.io/client-go/rest"
|
||
|
||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||
"github.com/seal-io/terraform-provider-kaniko/utils"
|
||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||
)
|
||
|
||
const (
|
||
defaultNamespace = "default"
|
||
kanikoImage = "gcr.io/kaniko-project/executor:v1.5.1"
|
||
// kanikoImage = "perl"
|
||
inClusterNamespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
||
)
|
||
|
||
type DockerConfigJSON struct {
|
||
Auths map[string]authn.AuthConfig
|
||
}
|
||
|
||
type imageResourceModel struct {
|
||
ID types.String `tfsdk:"id"`
|
||
GitUsername types.String `tfsdk:"git_username"`
|
||
GitPassword types.String `tfsdk:"git_password"`
|
||
|
||
Context types.String `tfsdk:"context"`
|
||
Dockerfile types.String `tfsdk:"dockerfile"`
|
||
Destination types.String `tfsdk:"destination"`
|
||
BuildArg types.Map `tfsdk:"build_arg"`
|
||
RegistryUsername types.String `tfsdk:"registry_username"`
|
||
RegistryPassword types.String `tfsdk:"registry_password"`
|
||
Cache types.Bool `tfsdk:"cache"`
|
||
NoPush types.Bool `tfsdk:"no_push"`
|
||
PushRetry types.Int64 `tfsdk:"push_retry"`
|
||
Reproducible types.Bool `tfsdk:"reproducible"`
|
||
Verbosity types.String `tfsdk:"verbosity"`
|
||
}
|
||
|
||
type runOptions struct {
|
||
ID string
|
||
GitRevision string
|
||
GitUsername string
|
||
GitPassword string
|
||
|
||
Context string
|
||
Dockerfile string
|
||
Destination string
|
||
BuildArg map[string]string
|
||
RegistryUsername string
|
||
RegistryPassword string
|
||
Cache bool
|
||
NoPush bool
|
||
Reproducible bool
|
||
PushRetry int64
|
||
Verbosity string
|
||
}
|
||
|
||
func getKanikoPod(namespace string, opts *runOptions) *apiv1.Pod {
|
||
args := []string{
|
||
fmt.Sprintf("--dockerfile=%s", opts.Dockerfile),
|
||
fmt.Sprintf("--context=%s", opts.Context),
|
||
fmt.Sprintf("--destination=%s", opts.Destination),
|
||
fmt.Sprintf("--push-retry=%d", opts.PushRetry),
|
||
fmt.Sprintf("--verbosity=%s", opts.Verbosity),
|
||
}
|
||
|
||
var volumeMounts []apiv1.VolumeMount
|
||
var volumes []apiv1.Volume
|
||
if opts.RegistryUsername != "" && opts.RegistryPassword != "" {
|
||
volumeMounts = append(volumeMounts, apiv1.VolumeMount{
|
||
Name: "docker-config",
|
||
MountPath: "/kaniko/.docker/",
|
||
})
|
||
volumes = append(volumes, apiv1.Volume{
|
||
Name: "docker-config",
|
||
VolumeSource: apiv1.VolumeSource{
|
||
Secret: &apiv1.SecretVolumeSource{
|
||
SecretName: opts.ID,
|
||
},
|
||
},
|
||
})
|
||
}
|
||
|
||
return &apiv1.Pod{
|
||
ObjectMeta: metav1.ObjectMeta{
|
||
Namespace: namespace,
|
||
Name: opts.ID,
|
||
},
|
||
Spec: apiv1.PodSpec{
|
||
Containers: []apiv1.Container{
|
||
{
|
||
Name: "build",
|
||
Image: kanikoImage,
|
||
VolumeMounts: volumeMounts,
|
||
Args: args,
|
||
},
|
||
},
|
||
Volumes: volumes,
|
||
RestartPolicy: apiv1.RestartPolicyNever,
|
||
},
|
||
}
|
||
|
||
}
|
||
|
||
func getDockerConfigSecret(namespace, name, registry, username, password string) (*apiv1.Secret, error) {
|
||
cfg := DockerConfigJSON{
|
||
Auths: map[string]authn.AuthConfig{
|
||
registry: {
|
||
Username: username,
|
||
Password: password,
|
||
},
|
||
},
|
||
}
|
||
data, err := json.Marshal(cfg)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
return &apiv1.Secret{
|
||
ObjectMeta: metav1.ObjectMeta{
|
||
Namespace: namespace,
|
||
Name: name,
|
||
},
|
||
Data: map[string][]byte{
|
||
"config.json": data,
|
||
},
|
||
}, nil
|
||
}
|
||
|
||
func kanikoBuild(ctx context.Context, restConfig *rest.Config, opts *runOptions, nowTime time.Time) error {
|
||
coreV1Client, err := v1.NewForConfig(restConfig)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
namespace := defaultNamespace
|
||
if _, err = os.Stat(inClusterNamespaceFile); err == nil {
|
||
namespaceBytes, err := os.ReadFile(inClusterNamespaceFile)
|
||
if err == nil {
|
||
namespace = string(namespaceBytes)
|
||
}
|
||
}
|
||
|
||
ref, err := name.ParseReference(opts.Destination)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
registry := fmt.Sprintf("https://%s/v1/", ref.Context().RegistryStr())
|
||
secret, err := getDockerConfigSecret(namespace, opts.ID, registry, opts.RegistryUsername, opts.RegistryPassword)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
|
||
if _, err := coreV1Client.Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
|
||
return err
|
||
}
|
||
|
||
pod := getKanikoPod(namespace, opts)
|
||
if _, err := coreV1Client.Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||
return err
|
||
}
|
||
|
||
defer func() {
|
||
// Clean up.
|
||
fmt.Println("Cleaning up kaniko job")
|
||
if err = coreV1Client.Pods(namespace).Delete(ctx, opts.ID, metav1.DeleteOptions{}); err != nil {
|
||
fmt.Println("failed to clean up kaniko job", map[string]any{"error": err})
|
||
tflog.Warn(ctx, "failed to clean up kaniko job", map[string]any{"error": err})
|
||
}
|
||
|
||
if err = coreV1Client.Secrets(namespace).Delete(ctx, opts.ID, metav1.DeleteOptions{}); err != nil {
|
||
fmt.Println("failed to clean up kaniko secret", map[string]any{"error": err})
|
||
tflog.Warn(ctx, "failed to clean up kaniko secret", map[string]any{"error": err})
|
||
}
|
||
|
||
// 打印总计时间
|
||
fmt.Println("Total time:", time.Since(nowTime))
|
||
}()
|
||
|
||
watchCount := 0
|
||
OuterLoop:
|
||
for {
|
||
watch, err := coreV1Client.Pods(namespace).Watch(context.TODO(), metav1.ListOptions{
|
||
FieldSelector: "metadata.name=" + opts.ID,
|
||
TimeoutSeconds: pointer.Int64Ptr(600), // Set the timeout to 1 hour
|
||
})
|
||
if err != nil {
|
||
panic(err)
|
||
}
|
||
ch := watch.ResultChan()
|
||
for event := range ch {
|
||
watchCount++
|
||
fmt.Println("watchCount=", watchCount)
|
||
pod, ok := event.Object.(*apiv1.Pod)
|
||
if !ok {
|
||
fmt.Println("unexpected type")
|
||
continue
|
||
}
|
||
// 如果 pod 的状态是 failed 或者 succeeded,则退出循环
|
||
if pod.Status.Phase == apiv1.PodFailed || pod.Status.Phase == apiv1.PodSucceeded {
|
||
fmt.Printf("Pod %s finished with status %s\n", opts.ID, pod.Status.Phase)
|
||
// 打印 POD 日志
|
||
podLogs, _ := getJobPodsLogs(ctx, namespace, opts.ID, restConfig)
|
||
fmt.Println("podLogs=", podLogs)
|
||
break OuterLoop
|
||
} else {
|
||
fmt.Printf("Pod %s still running with status %s, time: %s\n", opts.ID, pod.Status.Phase, time.Since(nowTime))
|
||
}
|
||
}
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
func main() {
|
||
// 创建 Kubernetes 集群的配置
|
||
// config, err := rest.InClusterConfig()
|
||
// if err != nil {
|
||
// fmt.Printf("Error creating in cluster config: %v\n", err)
|
||
// return
|
||
// }
|
||
|
||
// 获取现在时间
|
||
nowTime := time.Now()
|
||
// var config kanikoProviderModel
|
||
|
||
config, err := utils.LoadConfig("/Users/yangzun/.kube/config")
|
||
|
||
if err != nil {
|
||
fmt.Printf("Error creating in cluster config: %v\n", err.Error())
|
||
return
|
||
}
|
||
|
||
// Retrieve values from plan.
|
||
|
||
gitUsername := os.Getenv("GIT_USERNAME")
|
||
gitPassword := os.Getenv("GIT_PASSWORD")
|
||
registryUsername := os.Getenv("REGISTRY_USERNAME")
|
||
registryPassword := os.Getenv("REGISTRY_PASSWORD")
|
||
var pushRetry int64 = 5
|
||
verbosity := "debug"
|
||
|
||
id := fmt.Sprintf("kaniko-%s", utils.String(8))
|
||
|
||
opts := &runOptions{
|
||
ID: id,
|
||
GitPassword: gitPassword,
|
||
GitUsername: gitUsername,
|
||
RegistryUsername: registryUsername,
|
||
RegistryPassword: registryPassword,
|
||
Context: "git://gitlab-ee.treesir.pub/demotest/walrus/simple-web-service.git",
|
||
Dockerfile: "Dockerfile",
|
||
Destination: "harbor.treesir.pub/yangzun/kaniko-job-test:v1",
|
||
Cache: true,
|
||
NoPush: true,
|
||
PushRetry: pushRetry,
|
||
Reproducible: true,
|
||
Verbosity: verbosity,
|
||
}
|
||
|
||
// 创建 context 对象
|
||
ctx := context.TODO()
|
||
|
||
// 运行 kanikoBuild 函数
|
||
err = kanikoBuild(ctx, config, opts, nowTime)
|
||
if err != nil {
|
||
fmt.Printf("Error running kanikoBuild: %v\n", err)
|
||
return
|
||
}
|
||
|
||
fmt.Println("Kaniko job completed successfully")
|
||
}
|
||
|
||
func getJobPodsLogs(ctx context.Context, namespace, pod string, restConfig *rest.Config) (string, error) {
|
||
clientSet, err := kubernetes.NewForConfig(restConfig)
|
||
if err != nil {
|
||
return "", err
|
||
}
|
||
pods, err := clientSet.CoreV1().Pods(namespace).
|
||
List(ctx, metav1.ListOptions{
|
||
FieldSelector: "metadata.name=" + pod,
|
||
})
|
||
if err != nil {
|
||
return "", err
|
||
}
|
||
|
||
var logs string
|
||
for _, pod := range pods.Items {
|
||
var podLogs []byte
|
||
podLogs, err = clientSet.CoreV1().Pods(namespace).GetLogs(pod.Name, &apiv1.PodLogOptions{}).DoRaw(ctx)
|
||
if err != nil {
|
||
return "", err
|
||
}
|
||
logs += string(podLogs)
|
||
}
|
||
|
||
return logs, nil
|
||
}
|