┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl explain pods KIND: Pod VERSION: v1 DESCRIPTION: Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts. FIELDS: apiVersion <string> .... kind <string> ..... metadata <Object> ..... spec <Object> ..... status <Object> .... ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl explain pods.metadata KIND: Pod VERSION: v1
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get ns NAME STATUS AGE default Active 8d kube-node-lease Active 8d kube-public Active 8d kube-system Active 8d liruilong Active 7d10h liruilong-pod-create Active 4m18s
kubectl run podcommon --image=nginx --image-pull-policy=IfNotPresent --labels="name=liruilong" --env="name=liruilong"
1 2 3 4 5 6 7
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl run podcommon --image=nginx --image-pull-policy=IfNotPresent --labels="name=liruilong" --env="name=liruilong" pod/podcommon created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods NAME READY STATUS RESTARTS AGE podcommon 0/1 ContainerCreating 0 12s
查看pod调度到了那个节点
kubectl get pods -o wide
1 2 3 4 5 6 7 8 9 10
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl run pod-demo --image=nginx --labels=name=nginx --env="user=liruilong" --port=8888 --image-pull-policy=IfNotPresent pod/pod-demo created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-demo 1/1 Running 0 94s 10.244.171.149 vms82.liruilongs.github.io <none> <none> poddemo 1/1 Running 0 8m22s 10.244.70.41 vms83.liruilongs.github.io <none> <none> ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$
删除pod
kubectl delete pod pod-demo --force
1 2 3 4 5 6 7 8
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl delete pod pod-demo --force warning: Immediate deletion does not waitfor confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "pod-demo" force deleted ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods | grep pod- ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$
每个Pod都有一个pause镜像
1 2 3 4 5
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$ansible 192.168.26.83 -m shell -a "docker ps | grep podcomm" 192.168.26.83 | CHANGED | rc=0 >> c04e155aa25d nginx "/docker-entrypoint.…" 21 minutes ago Up 21 minutes k8s_podcommon_podcommon_liruilong-pod-create_dbfc4fcd-d62b-4339-9f15-0a48802f60ad_0 309925812d42 registry.aliyuncs.com/google_containers/pause:3.4.1 "/pause" 21 minutes ago Up 21 minutes k8s_POD_podcommon_liruilong-pod-create_dbfc4fcd-d62b-4339-9f15-0a48802f60ad_0
生成yaml文件的方式创建pod:-o yaml
kubectl run pod-demo --image=nginx --image-pull-policy=IfNotPresent --dry-run=client -o yaml >pod-demo.yaml
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod-demo.yaml pod/pod-demo created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-demo 1/1 Running 0 27s 10.244.70.4 vms83.liruilongs.github.io <none> <none> podcommon 1/1 Running 0 13m 10.244.70.3 vms83.liruilongs.github.io <none> <none>
删除pod:delete pod
1 2 3 4 5 6 7
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl delete pod pod-demo pod "pod-demo" deleted ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES podcommon 1/1 Running 0 14m 10.244.70.3 vms83.liruilongs.github.io <none> <none>
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f comm-pod.yaml pod/comm-pod created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods NAME READY STATUS RESTARTS AGE comm-pod 2/2 Running 0 20s
镜像的下载策略
--image-pull-policy
Always 每次都下载最新镜像
Never 只使用本地镜像,从不下载
IfNotPresent 本地没有才下载
pod的重启策略
restartPolicy–单个容器正常退出
Always 总是重启
OnFailure 非正常退出才重启
Never 从不重启
labels 标签
k8s中每个资源对象都有标签
1 2 3 4 5 6 7 8 9 10
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes --show-labels NAME STATUS ROLES AGE VERSION LABELS vms81.liruilongs.github.io Ready control-plane,master 8d v1.21.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=vms81.liruilongs.github.io,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers= vms82.liruilongs.github.io Ready <none> 8d v1.21.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=vms82.liruilongs.github.io,kubernetes.io/os=linux vms83.liruilongs.github.io Ready <none> 8d v1.21.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=vms83.liruilongs.github.io,kubernetes.io/os=linux ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS podcommon 1/1 Running 0 87s name=liruilong
查看标签
1 2 3 4 5 6 7
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS comm-pod 2/2 Running 0 4m43s run=comm-pod mysql-577h7 1/1 Running 0 93m app=mysql myweb-4xlc5 1/1 Running 0 92m app=myweb myweb-ltqdt 1/1 Running 0 91m app=myweb
指定标签过滤
1 2 3 4 5 6
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -l run=comm-pod NAME READY STATUS RESTARTS AGE comm-pod 2/2 Running 0 5m12s ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$
pod的状态
pod的状态
–
Pending pod
因为其他的原因导致pod准备开始创建 还没有创建(卡住了)
Running pod
已经被调度到节点上,且容器工作正常
Completed pod
里所有容器正常退出
error/CrashLoopBackOff
创建的时候就出错,属于内部原因
imagePullBackoff
创建pod的时候,镜像下载失败
三、Pod的基本操作
在pod里执行命令,查看pod详细信息。查看pod日志
1 2 3 4 5 6
kubectl exec 命令 kubectl exec -it pod sh #如果pod里有多个容器,则命令是在第一个容器里执行 kubectl exec -it demo -c demo1 sh # 指定容器 kubectl describe pod pod名 kubectl logs pod名 -c 容器名 #如果有多个容器的话 查看日志。 kubectl edit pod pod名 # 部分可以修改,有些不能修改
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectl describe pod demo1 Name: demo1 Namespace: liruilong-pod-create Priority: 0 Node: vms83.liruilongs.github.io/192.168.26.83 Start Time: Wed, 20 Oct 2021 22:27:15 +0800 Labels: run=demo1 Annotations: cni.projectcalico.org/podIP: 10.244.70.32/32 cni.projectcalico.org/podIPs: 10.244.70.32/32 Status: Running IP: 10.244.70.32 IPs: IP: 10.244.70.32 Containers: demo1: Container ID: docker://0d644ad550f59029036fd73d420d4d2c651801dd12814bb26ad8e979dc0b59c1 Image: nginx Image ID: docker-pullable://nginx@sha256:644a70516a26004c97d0d85c7fe1d0c3a67ea8ab7ddf4aff193d9f301670cf36 Port: <none> Host Port: <none> State: Running Started: Wed, 20 Oct 2021 22:27:20 +0800 Ready: True Restart Count: 0 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-scc89 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: kube-api-access-scc89: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 13m default-scheduler Successfully assigned liruilong-pod-create/demo1 to vms83.liruilongs.github.io Normal Pulled 13m kubelet Container image "nginx" already present on machine Normal Created 13m kubelet Created container demo1 Normal Started 13m kubelet Started container demo1
在pod里执行命令
kubectl exec -it demo1 -- ls /tmp
1 2 3 4 5 6
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectlexec -it demo1 -- sh # ls bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var boot docker-entrypoint.d etc lib media opt root sbin sys usr # exit
1 2 3 4 5 6 7
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectlexec -it demo1 -- bash root@demo1:/# ls bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var boot docker-entrypoint.d etc lib media opt root sbin sys usr root@demo1:/# exit exit
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectlexec -it comm-pod -c comm-pod1 -- sh # ls bin boot dev docker-entrypoint.d docker-entrypoint.sh etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var # exit ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$#
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f demo.yaml pod/demo created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods NAME READY STATUS RESTARTS AGE demo 1/1 Running 0 21s ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectlexec -it demo -- bin/bash root@demo:/# ls bin dev docker-entrypoint.sh home lib64 media opt root sbin sys usr boot docker-entrypoint.d etc lib liruilong mnt proc run srv tmp var root@demo:/# cat liruilong liruilongSun Nov 14 05:10:51 UTC 2021 root@demo:/#
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod_init.yaml pod/pod-init created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-init 1/1 Running 0 11m 10.244.70.54 vms83.liruilongs.github.io <none> <none>
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod_init1.yaml pod/pod-init1 created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods pod-init1 NAME READY STATUS RESTARTS AGE pod-init1 1/1 Running 0 30s ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectlexec -it pod-init1 /bin/sh kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Defaulted container "pod1-init" out of: pod1-init, init (init) # ls 2021 boot docker-entrypoint.d etc lib media opt root sbin sys usr bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var # cd 2021;ls liruilong.txt #
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf # Note: This dropin only works with kubeadm and kubelet v1.11+ [Service] Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/kubelet.d" Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. EnvironmentFile=-/etc/sysconfig/kubelet ExecStart= ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS$KUBELET_CONFIG_ARGS$KUBELET_KUBEADM_ARGS$KUBELET_EXTRA_ARGS ┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$mkdir -p /etc/kubernetes/kubelet.d
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectl get pod -n default NAME READY STATUS RESTARTS AGE pod-static-vms82.liruilongs.github.io 1/1 Running 0 8m17s pod-static-vms83.liruilongs.github.io 1/1 Running 0 9m3s ┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$ansible node -m shell -a "rm -rf /etc/kubernetes/kubelet.d/static-pod.yaml"
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf # Note: This dropin only works with kubeadm and kubelet v1.11+ [Service] Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf " Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. EnvironmentFile=-/etc/sysconfig/kubelet ExecStart= ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS$KUBELET_CONFIG_ARGS$KUBELET_KUBEADM_ARGS$KUBELET_EXTRA_ARGS ┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$grep static /var/lib/kubelet/config.yaml staticPodPath: /etc/kubernetes/manifests ┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectl get node NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 45d v1.22.2 vms82.liruilongs.github.io Ready <none> 45d v1.22.2 vms83.liruilongs.github.io Ready <none> 45d v1.22.2
┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$kubectl get node NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 45d v1.22.2 vms82.liruilongs.github.io Ready worker1 45d v1.22.2 vms83.liruilongs.github.io Ready worker2 45d v1.22.2 ┌──[root@vms81.liruilongs.github.io]-[~/ansible] └─$
选择器(nodeSelector)方式
在特定节点上运行pod
1 2 3 4 5 6 7 8 9 10 11 12 13
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes -l disktype=node2 NAME STATUS ROLES AGE VERSION vms83.liruilongs.github.io Ready worker2 45d v1.22.2 ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$vim pod-node2.yaml ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod-node2.yaml pod/podnode2 created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES podnode2 1/1 Running 0 13m 10.244.70.60 vms83.liruilongs.github.io <none> <none>
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod-node-a.yaml pod/podnodea created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods NAME READY STATUS RESTARTS AGE podnodea 0/1 Pending 0 8s
我梦修改一下
1 2 3 4 5 6 7 8 9 10 11
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$sed -i 's/vms84.liruilongs.github.io/vms83.liruilongs.github.io/' pod-node-a.yaml ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl apply -f pod-node-a.yaml pod/podnodea created ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES podnodea 1/1 Running 0 13s 10.244.70.61 vms83.liruilongs.github.io <none> <none> ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 48d v1.22.2 vms82.liruilongs.github.io Ready worker1 48d v1.22.2 vms83.liruilongs.github.io Ready,SchedulingDisabled worker2 48d v1.22.2
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl uncordon vms83.liruilongs.github.io #恢复节点状态 node/vms83.liruilongs.github.io uncordoned ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 48d v1.22.2 vms82.liruilongs.github.io Ready worker1 48d v1.22.2 vms83.liruilongs.github.io Ready worker2 48d v1.22.2
删除所有的pod
1 2 3 4 5 6
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl scale deployment nginx --replicas=0 deployment.apps/nginx scaled ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get pods -o wide No resources found in liruilong-pod-create namespace.
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl drain vms82.liruilongs.github.io node/vms82.liruilongs.github.io cordoned DEPRECATED WARNING: Aborting the drain commandin a list of nodes will be deprecated in v1.23. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience via: --ignore-errors error: unable to drain node "vms82.liruilongs.github.io", aborting command...
There are pending nodes to be drained: vms82.liruilongs.github.io cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-ntm7v, kube-system/kube-proxy-nzm24 cannot delete Pods with local storage (use --delete-emptydir-data to override): kube-system/metrics-server-bcfb98c76-wxv5l ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 48d v1.22.2 vms82.liruilongs.github.io Ready,SchedulingDisabled worker1 48d v1.22.2 vms83.liruilongs.github.io Ready worker2 48d v1.22.2
uncordon掉刚才的节点
1 2 3 4 5 6 7 8 9
┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl uncordon vms82.liruilongs.github.io node/vms82.liruilongs.github.io uncordoned ┌──[root@vms81.liruilongs.github.io]-[~/ansible/k8s-pod-create] └─$kubectl get nodes NAME STATUS ROLES AGE VERSION vms81.liruilongs.github.io Ready control-plane,master 48d v1.22.2 vms82.liruilongs.github.io Ready worker1 48d v1.22.2 vms83.liruilongs.github.io Ready worker2 48d v1.22.2