共计 7941 个字符,预计需要花费 20 分钟才能阅读完成。
在 kubernetes 集群中,每个 Node 节点都会启动 kubelet 进程,用来处理 Master 节点下发到本节点的任务,管理 Pod 和其中的容器。kubelet 会在 API Server 上注册节点信息,定期向 Master 汇报节点资源使用情况,并通过 cAdvisor 监控容器和节点资源。可以把 kubelet 理解成【Server-Agent】架构中的 agent,是 Node 上的 pod 管家。
看到上面的介绍,其实也就是脏活累活由他去干。
签发 kubelet 证书
** 运维主机 hdss7-200 上,注意一定要多预留你的 ip 地址,因为以后随着业务的增长,节点数目是会变多的。最好有预期,
但是现在生产上也不采用这种方式部署。只是方便我们学习组件的通讯过程以及作用。**
cd /opt/certs
[root@hdss7-200 certs]# vim kubelet-csr.json
{
"CN": "k8s-kubelet",
"hosts": [
"127.0.0.1",
"10.4.7.10",
"10.4.7.21",
"10.4.7.22",
"10.4.7.23",
"10.4.7.24",
"10.4.7.25",
"10.4.7.26",
"10.4.7.27",
"10.4.7.28"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
[root@hdss7-200 certs]# ll
total 84
-rw-r--r--. 1 root root 1249 Apr 21 14:35 apiserver.csr
-rw-r--r--. 1 root root 566 Apr 21 14:09 apiserver-csr.json
-rw-------. 1 root root 1675 Apr 21 14:35 apiserver-key.pem
-rw-r--r--. 1 root root 1598 Apr 21 14:35 apiserver.pem
-rw-r--r--. 1 root root 840 Apr 20 10:25 ca-config.json
-rw-r--r--. 1 root root 993 Apr 18 17:47 ca.csr
-rw-r--r--. 1 root root 332 Apr 18 17:40 ca-csr.json
-rw-------. 1 root root 1675 Apr 18 17:47 ca-key.pem
-rw-r--r--. 1 root root 1346 Apr 18 17:47 ca.pem
-rw-r--r--. 1 root root 993 Apr 21 11:36 client.csr
-rw-r--r--. 1 root root 280 Apr 21 11:36 client-csr.json
-rw-------. 1 root root 1679 Apr 21 11:36 client-key.pem
-rw-r--r--. 1 root root 1363 Apr 21 11:36 client.pem
-rw-r--r--. 1 root root 1062 Apr 20 10:36 etcd-peer.csr
-rw-r--r--. 1 root root 363 Apr 20 10:28 etcd-peer-csr.json
-rw-------. 1 root root 1675 Apr 20 10:36 etcd-peer-key.pem
-rw-r--r--. 1 root root 1428 Apr 20 10:36 etcd-peer.pem
-rw-r--r--. 1 root root 1115 Apr 22 15:23 kubelet.csr
-rw-r--r--. 1 root root 452 Apr 22 15:16 kubelet-csr.json
-rw-------. 1 root root 1679 Apr 22 15:23 kubelet-key.pem
-rw-r--r--. 1 root root 1468 Apr 22 15:23 kubelet.pem
拷贝证书
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet-key.pem .
[root@hdss7-22 cert]# scp hdss7-200:/opt/certs/kubelet.pem .
[root@hdss7-22 cert]# scp hdss7-200:/opt/certs/kubelet-key.pem .
创建配置 — 分四步
都在 conf 目录下
1,set-cluster
kubectl config set-cluster myk8s \
--certificate-authority=/usr/local/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=kubelet.kubeconfig
2,set-credentials
kubectl config set-credentials k8s-node \
--client-certificate=/usr/local/kubernetes/server/bin/cert/client.pem \
--client-key=/usr/local/kubernetes/server/bin/cert/client-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig
3,set-context
kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig
4,use-context
kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
根据配置文件创建用户
[root@hdss7-21 conf]# vim k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@hdss7-21 conf]# kubectl apply -f k8s-node.yaml
[root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node
NAME AGE
k8s-node 2m29s
[root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"name":"k8s-node"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"system:node"},"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"k8s-node"}]}
creationTimestamp: "2022-04-22T08:53:45Z"
name: k8s-node
resourceVersion: "13910"
selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
uid: e2f6bd5e-2515-4bcb-90dc-9328d7aa62ae
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
拷贝 kubelet.kubeconfig 到 hdss7-22 上
scp hdss7-21:/usr/local/kubernetes/server/bin/conf/kubelet.kubeconfig .
准备 pause 基础镜像
为什么需要这个 pause 基础镜像?
原因:需要用一个 pause 基础镜像把这台机器的 pod 拉起来,因为 kubelet 是干活的节点,它帮我们调度 docker 引擎,边车模式,让 kebelet 控制一个小镜像,先于我们的业务容器起来,让它帮我们业务容器去设置:UTC、NET、IPC,让它先把命名空间占上,业务容易还没起来的时候,pod 的 ip 已经分配出来了。
运维主机 hdss7-200
docker pull kubernetes/pause
docker tag f9d5de079539 harbor.od.com/public/pause:latest
docker push harbor.od.com/public/pause:latest
启动脚本
[root@hdss7-21 conf]# vim /usr/local/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./cert/ca.pem \
--tls-cert-file ./cert/kubelet.pem \
--tls-private-key-file ./cert/kubelet-key.pem \
--hostname-override hdss7-21.host.com \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.od.com/public/pause:latest \
--root-dir /data/kubelet
################### 参数说明 ############
--anonymous-auth=false // 匿名登陆,这里不允许
--cgroup-driver systemd // 这里和 docker 的 daemon.json 保持一直
--cluster-dns 192.168.0.2
--cluster-domain cluster.local
--runtime-cgroups=/systemd/system.slice
--kubelet-cgroups=/systemd/system.slice
--fail-swap-on="false" // 正常是关闭 swap 分区的。这里不关,没有关闭 swap 分区正常启动,没有报错
--client-ca-file ./cert/ca.pem
--tls-cert-file ./cert/kubelet.pem
--tls-private-key-file ./cert/kubelet-key.pem
--hostname-override hdss7-21.host.com
--image-gc-high-threshold 20
--image-gc-low-threshold 10
--kubeconfig ./conf/kubelet.kubeconfig
--log-dir /data/logs/kubernetes/kube-kubelet
--pod-infra-container-image harbor.od.com/public/pause:latest
--root-dir /data/kubelet
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
kubelet 集群个主机的启动脚本略不同,其他节点注意修改:–hostname-override
[root@hdss7-22 conf]# vim /usr/local/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./cert/ca.pem \
--tls-cert-file ./cert/kubelet.pem \
--tls-private-key-file ./cert/kubelet-key.pem \
--hostname-override hdss7-22.host.com \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.od.com/public/pause:latest \
--root-dir /data/kubelet
[root@hdss7-21 conf]#vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-7-21]
command=/usr/local/kubernetes/server/bin/kubelet.sh
numprocs=1
directory=/usr/local/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
[root@hdss7-22 conf]#vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-7-22]
command=/usr/local/kubernetes/server/bin/kubelet.sh
numprocs=1
directory=/usr/local/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
检测 node 状态,可以看到已经起来。
[root@hdss7-21 bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready <none> 4m31s v1.15.12
hdss7-22.host.com Ready <none> 109s v1.15.12
打上标签
kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=
kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=
kubectl label node hdss7-22.host.com node-role.kubernetes.io/master=
kubectl label node hdss7-22.host.com node-role.kubernetes.io/node=
查看 node 标签
[root@hdss7-21 bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 11m v1.15.12
hdss7-22.host.com Ready master,node 8m45s v1.15.12
kubelet 安装完成