Completed - 不再占用 CPU 和 Memory 等资源,只保留 Pod 的 manifest,调度器也不会考虑已经 Completed 的 Pod
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
$ k apply -f job.yaml job.batch/pi created
$ k get job -owide NAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR pi 5/5 21s 102s pi perl controller-uid=b89bbfc4-a2fc-410a-ab5e-859e83c4aef6
$ k get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pi--1-4fdfd 0/1 Completed 0 29s 192.168.185.19 mac-k8s <none> <none> pi--1-76mmd 0/1 Completed 0 44s 192.168.185.15 mac-k8s <none> <none> pi--1-cxgrn 0/1 Completed 0 38s 192.168.185.17 mac-k8s <none> <none> pi--1-d4n7b 0/1 Completed 0 44s 192.168.185.16 mac-k8s <none> <none> pi--1-fgxbs 0/1 Completed 0 36s 192.168.185.18 mac-k8s <none> <none>
$ k logs pi--1-4fdfd 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
$ k create -f statefulset.yaml statefulset.apps/nginx-sts created service/nginx-sts created
$ k get sts -owide NAME READY AGE CONTAINERS IMAGES nginx-sts 1/1 2m1s nginx nginx
$ k get svc -owide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d2h <none> nginx-sts ClusterIP None <none> 80/TCP 33s app=nginx-sts
$ k get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-sts-0 1/1 Running 0 17s 192.168.185.8 mac-k8s <none> <none>
$ k scale sts nginx-sts --replicas=2 statefulset.apps/nginx-sts scaled
$ k get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-sts-0 1/1 Running 0 14m 192.168.185.8 mac-k8s <none> <none> nginx-sts-1 1/1 Running 0 14s 192.168.185.9 mac-k8s <none> <none>
Pod - k get po nginx-sts-0 -oyaml - controller-revision-hash
$ k get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-sts-0 1/1 Running 0 29m 192.168.185.8 mac-k8s <none> <none> nginx-sts-1 1/1 Running 0 14m 192.168.185.9 mac-k8s <none> <none>
// create the controller so we can inject the enqueue function namespaceController := &NamespaceController{ queue: workqueue.NewNamedRateLimitingQueue(nsControllerRateLimiter(), "namespace"), namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(ctx, kubeClient.CoreV1().Namespaces(), metadataClient, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken), }
// enqueueNamespace adds an object to the controller work queue // obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item. func(nm *NamespaceController) enqueueNamespace(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)) return }
namespace := obj.(*v1.Namespace) // don't queue if we aren't deleted if namespace.DeletionTimestamp == nil || namespace.DeletionTimestamp.IsZero() { return }
// delay processing namespace events to allow HA api servers to observe namespace deletion, // and HA etcd servers to observe last minute object creations inside the namespace nm.queue.AddAfter(key, namespaceDeletionGracePeriod) }
// worker processes the queue of namespace objects. // Each namespace can be in the queue at most once. // The system ensures that no two workers can process // the same namespace at the same time. func(nm *NamespaceController) worker(ctx context.Context) { workFunc := func(ctx context.Context)bool { key, quit := nm.queue.Get() if quit { returntrue } defer nm.queue.Done(key)
err := nm.syncNamespaceFromKey(ctx, key.(string)) if err == nil { // no error, forget this entry and return nm.queue.Forget(key) returnfalse }
if estimate, ok := err.(*deletion.ResourcesRemainingError); ok { t := estimate.Estimate/2 + 1 klog.FromContext(ctx).V(4).Info("Content remaining in namespace", "namespace", key, "waitSeconds", t) nm.queue.AddAfter(key, time.Duration(t)*time.Second) } else { // rather than wait for a full resync, re-add the namespace to the queue to be processed nm.queue.AddRateLimited(key) utilruntime.HandleError(fmt.Errorf("deletion of namespace %v failed: %v", key, err)) } returnfalse } for { quit := workFunc(ctx)
if quit { return } } }
namespace_controller.go
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// syncNamespaceFromKey looks for a namespace with the specified key in its store and synchronizes it func(nm *NamespaceController) syncNamespaceFromKey(ctx context.Context, key string) (err error) { startTime := time.Now() logger := klog.FromContext(ctx) deferfunc() { logger.V(4).Info("Finished syncing namespace", "namespace", key, "duration", time.Since(startTime)) }()
namespace, err := nm.lister.Get(key) if errors.IsNotFound(err) { logger.Info("Namespace has been deleted", "namespace", key) returnnil } if err != nil { utilruntime.HandleError(fmt.Errorf("Unable to retrieve namespace %v from store: %v", key, err)) return err } return nm.namespacedResourcesDeleter.Delete(ctx, namespace.Name) }
namespace_controller.go
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Run starts observing the system with the specified number of workers. func(nm *NamespaceController) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer nm.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting namespace controller") defer logger.Info("Shutting down namespace controller")
if !cache.WaitForNamedCacheSync("namespace", ctx.Done(), nm.listerSynced) { return }
logger.V(5).Info("Starting workers of namespace controller") for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, nm.worker, time.Second) } <-ctx.Done() }
// Delete deletes all resources in the given namespace. // Before deleting resources: // - It ensures that deletion timestamp is set on the // namespace (does nothing if deletion timestamp is missing). // - Verifies that the namespace is in the "terminating" phase // (updates the namespace phase if it is not yet marked terminating) // // After deleting the resources: // * It removes finalizer token from the given namespace. // // Returns an error if any of those steps fail. // Returns ResourcesRemainingError if it deleted some resources but needs // to wait for them to go away. // Caller is expected to keep calling this until it succeeds. func(d *namespacedResourcesDeleter) Delete(ctx context.Context, nsName string) error { // Multiple controllers may edit a namespace during termination // first get the latest state of the namespace before proceeding // if the namespace was deleted already, don't do anything namespace, err := d.nsClient.Get(context.TODO(), nsName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { returnnil } return err } if namespace.DeletionTimestamp == nil { returnnil }
// ensure that the status is up to date on the namespace // if we get a not found error, we assume the namespace is truly gone namespace, err = d.retryOnConflictError(namespace, d.updateNamespaceStatusFunc) if err != nil { if errors.IsNotFound(err) { returnnil } return err }
// the latest view of the namespace asserts that namespace is no longer deleting.. if namespace.DeletionTimestamp.IsZero() { returnnil }
// return if it is already finalized. if finalized(namespace) { returnnil }
// there may still be content for us to remove estimate, err := d.deleteAllContent(ctx, namespace) if err != nil { return err } if estimate > 0 { return &ResourcesRemainingError{estimate} }
// we have removed content, so mark it finalized by us _, err = d.retryOnConflictError(namespace, d.finalizeNamespace) if err != nil { // in normal practice, this should not be possible, but if a deployment is running // two controllers to do namespace deletion that share a common finalizer token it's // possible that a not found could occur since the other controller would have finished the delete. if errors.IsNotFound(err) { returnnil } return err } returnnil }
$ k apply -f daemonset.yaml daemonset.apps/nginx-ds created
$ k get ds -owide NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR nginx-ds 1 1 1 1 1 <none> 15s nginx nginx app=nginx
$ k describe ds nginx-ds Name: nginx-ds Selector: app=nginx Node-Selector: <none> Labels: <none> Annotations: deprecated.daemonset.template.generation: 1 Desired Number of Nodes Scheduled: 1 Current Number of Nodes Scheduled: 1 Number of Nodes Scheduled with Up-to-date Pods: 1 Number of Nodes Scheduled with Available Pods: 1 Number of Nodes Misscheduled: 0 Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=nginx Containers: nginx: Image: nginx Port: <none> Host Port: <none> Environment: <none> Mounts: <none> Volumes: <none> Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 62s daemonset-controller Created pod: nginx-ds-8frrn
$ k get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-ds-8frrn 1/1 Running 0 30s 192.168.185.10 mac-k8s <none> <none>
Pod - k get po nginx-ds-8frrn -oyaml - controller-revision-hash
$ k get controllerrevisions.apps NAME CONTROLLER REVISION AGE nginx-ds-6799fc88d8 daemonset.apps/nginx-ds 1 19m nginx-sts-6798d68dcd statefulset.apps/nginx-sts 1 13s
$ k api-resources --api-group='coordination.k8s.io' NAME SHORTNAMES APIVERSION NAMESPACED KIND leases coordination.k8s.io/v1 true Lease
$ k get leases.coordination.k8s.io -A NAMESPACE NAME HOLDER AGE kube-node-lease mac-k8s mac-k8s 2d4h kube-system kube-controller-manager mac-k8s_9cf04af5-7c44-4873-83b5-695cc4f42263 2d4h kube-system kube-scheduler mac-k8s_085b5652-9b3f-49d5-bde2-02932851bbd7 2d4h tigera-operator operator-lock mac-k8s_16ca74c2-4970-455f-9d35-2ef722aecf48 2d4h