| package main import (   "context"   "flag"   "os"   "os/signal"   "syscall"   "time"   "github.com/google/uuid"   metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"   clientset "k8s.io/client-go/kubernetes"   "k8s.io/client-go/rest"   "k8s.io/client-go/tools/clientcmd"   "k8s.io/client-go/tools/leaderelection"   "k8s.io/client-go/tools/leaderelection/resourcelock"   "k8s.io/klog/v2" ) func buildConfig(kubeconfig string) (*rest.Config, error) {   if kubeconfig != "" {     cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)     if err != nil {       return nil, err     }     return cfg, nil   }   cfg, err := rest.InClusterConfig()   if err != nil {     return nil, err   }   return cfg, nil } func main() {   klog.InitFlags(nil)   var kubeconfig string   var leaseLockName string   var leaseLockNamespace string   var id string   // kubeconfig 指定了kubernetes集群的配置文文件路径   flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")   // 锁的拥有者的ID,如果没有传参数进来,就随机生成一个   flag.StringVar(&id, "id", uuid.New().String(), "the holder identity name")   // 锁的ID,对应kubernetes中资源的name   flag.StringVar(&leaseLockName, "lease-lock-name", "", "the lease lock resource name")   // 锁的命名空间   flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "", "the lease lock resource namespace")   // 解析命令行参数   flag.Parse()   if leaseLockName == "" {     klog.Fatal("unable to get lease lock resource name (missing lease-lock-name flag).")   }   if leaseLockNamespace == "" {     klog.Fatal("unable to get lease lock resource namespace (missing lease-lock-namespace flag).")   }   // leader election uses the Kubernetes API by writing to a   // lock object, which can be a LeaseLock object (preferred),   // a ConfigMap, or an Endpoints (deprecated) object.   // Conflicting writes are detected and each client handles those actions   // independently.   config, err := buildConfig(kubeconfig)   if err != nil {     klog.Fatal(err)   }   // 获取kubernetes集群的客户端,如果获取不到,就抛异常退出   client := clientset.NewForConfigOrDie(config)   // 模拟Controller的逻辑代码   run := func(ctx context.Context) {     // complete your controller loop here     klog.Info("Controller loop...")     // 不退出     select {}   }   // use a Go context so we can tell the leaderelection code when we   // want to step down   ctx, cancel := context.WithCancel(context.Background())   defer cancel()   // listen for interrupts or the Linux SIGTERM signal and cancel   // our context, which the leader election code will observe and   // step down   // 处理系统的系统,收到SIGTERM信号后,会退出进程   ch := make(chan os.Signal, 1)   signal.Notify(ch, os.Interrupt, syscall.SIGTERM)   go func() {     <-ch     klog.Info("Received termination, signaling shutdown")     cancel()   }()   // we use the Lease lock type since edits to Leases are less common   // and fewer objects in the cluster watch "all Leases".       // 根据参数,生成锁。这里使用的Lease这种类型资源作为锁   lock := &resourcelock.LeaseLock{     LeaseMeta: metav1.ObjectMeta{       Name:      leaseLockName,       Namespace: leaseLockNamespace,     },     // 跟kubernetes集群关联起来     Client: client.CoordinationV1(),     LockConfig: resourcelock.ResourceLockConfig{       Identity: id,     },   }   // start the leader election code loop       // 注意,选举逻辑启动时候,会传入ctx参数,如果ctx对应的cancel函数被调用,那么选举也会结束   leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{     // 选举使用的锁     Lock: lock,     // IMPORTANT: you MUST ensure that any code you have that     // is protected by the lease must terminate **before**     // you call cancel. Otherwise, you could have a background     // loop still running and another process could     // get elected before your background loop finished, violating     // the stated goal of the lease.     //主动放弃leader,当ctx canceled的时候     ReleaseOnCancel: true,     LeaseDuration:   60 * time.Second,  // 选举的任期,60s一个任期,如果在60s后没有renew,那么leader就会释放锁,重新选举     RenewDeadline:   15 * time.Second,  // renew的请求的超时时间     RetryPeriod:     5 * time.Second, // leader获取到锁后,renew leadership的间隔。非leader,抢锁成为leader的间隔(有1.2的jitter因子,详细看代码)           // 回调函数的注册     Callbacks: leaderelection.LeaderCallbacks{               // 成为leader的回调       OnStartedLeading: func(ctx context.Context) {         // we're notified when we start - this is where you would         // usually put your code         // 运行controller的逻辑         run(ctx)       },       OnStoppedLeading: func() {         // we can do cleanup here         // 退出leader的         klog.Infof("leader lost: %s", id)         os.Exit(0)       },       OnNewLeader: func(identity string) {         // 有新的leader当选         // we're notified when new leader elected         if identity == id {           // I just got the lock           return         }         klog.Infof("new leader elected: %s", identity)       },     },   }) } |