{"id":549,"title":"\u4e3b\u8282\u70b9\u53canode\u8282\u70b9\u914d\u7f6e","good":0,"bad":0,"hit":1654,"created_at":"2023-01-02 23:26:05","content":"

\u521d\u59cb\u5316\u4e3b\u8282\u70b9<\/span><\/h3>

\u3010\u5982\u679c\u4e3b\u8282\u70b9ip\u6539\u4e86\uff0c\u5219\u8981\u5148\u6267\u884ckubeadm reset,\u7136\u540e\u518d\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\u3011<\/span><\/h3>

<\/span><\/p>

#\u4e3b\u8282\u70b9\u521d\u59cb\u5316 (\u76f4\u63a5\u590d\u5236\u6267\u884c\u53ef\u80fd\u4f1a\u62a5\u9519\uff0c\u5148\u590d\u5236\u5230txt\u4e2d\u8fc7\u6ee4\u4e00\u4e0b\u4ee3\u7801\u4e2d\u7684\u4e0d\u53ef\u89c1\u90e8\u5206\uff09<\/p>

kubeadm init \\<\/p>

--apiserver-advertise-address=192.168.14.128<\/span> \\<\/p>

--control-plane-endpoint=cluster-endpoint<\/span> \\<\/p>

--image-repository registry.cn-hangzhou.aliyuncs.com\/lfy_k8s_images<\/span> \\<\/p>

--kubernetes-version v1.20.9 \\<\/p>

--service-cidr=10.96.0.0\/16<\/span> \\<\/p>

--pod-network-cidr=192.168.0.0\/16<\/span><\/p>

<\/p>

\u7ea2\u8272\u6539\u4e3a\u81ea\u5df1\u7684\u4e3b\u8282\u70b9ip,\u4e3b\u8282\u70b9\u540d\u79f0\uff08\u8981\u4e0e\/etc\/hosts\u6587\u4ef6\u4e2d\u914d\u7f6e\u7684\u4e00\u81f4\uff09<\/span>\uff0c\u4ed3\u5e93\u5730\u5740\uff08\u5982\u679c\u8fd9\u4e2a\u80fd\u7528\u4e0d\u6539\u4e5f\u53ef\u4ee5\uff09\uff0c\u7ea2\uff0c\u84dd\uff0c\u7eff\u8272\u6807\u6ce8\u7684\u6240\u6709\u7f51\u7edc\u8303\u56f4\u4e0d\u91cd\u53e0\uff0c\u4e5f\u4e0d\u80fd\u76f8\u540c<\/p>

<\/p>

\u5982\u679c\u770b\u5230\u4ee5\u4e0b\u4fe1\u606f\u5219\u8868\u793a\u6210\u529f\uff0c\u7136\u540e\u6309\u7167\u7ea2\u8272\u90e8\u5206\u7684\u63d0\u793a\u6267\u884c\uff0c\u4e14\u5c06\u4ee5\u4e0b\u4fe1\u606f\u4fdd\u5b58\u597d\uff0c\u7eff\u8272\u662f\u52a0\u4e3b\u8282\u70b9\u548c\u4ece\u8282\u70b9\u7684\u547d\u4ee4\uff0c\u84dd\u8272\u662f\u9700\u8981\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\uff1a<\/span><\/p>

<\/p>

Your Kubernetes control-plane has initialized successfully!<\/p>

<\/p>

To start using your cluster, you need to run the following as a regular user:<\/p>

<\/p>

  mkdir -p $HOME\/.kube<\/span><\/p>

  sudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config<\/span><\/p>

  sudo chown $(id -u):$(id -g) $HOME\/.kube\/config<\/span><\/p>

<\/p>

Alternatively, if you are the root user, you can run:<\/p>

<\/p>

  export KUBECONFIG=\/etc\/kubernetes\/admin.conf<\/p>

<\/p>

You should now deploy a pod network to the cluster.<\/span><\/p>

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:<\/span><\/p>

  https:\/\/kubernetes.io\/docs\/concepts\/cluster-administration\/addons\/<\/span><\/p>

<\/p>

You can now join any number of control-plane nodes by copying certificate authorities<\/p>

and service account keys on each node and then running the following as root:<\/p>

<\/p>

  kubeadm join cluster-endpoint:6443 --token rxvjig.yplqam3qf3tq6s99 \\<\/span><\/p>

    --discovery-token-ca-cert-hash sha256:32f8bfd1e9ad11270e2d9d2d6284daef5a09476676ee27329b64d973a0f493e0 \\<\/span><\/p>

    --control-plane <\/span><\/p>

<\/p>

Then you can join any number of worker nodes by running the following on each as root:<\/p>

<\/p>

kubeadm join cluster-endpoint:6443 --token rxvjig.yplqam3qf3tq6s99 \\<\/span><\/p>

    --discovery-token-ca-cert-hash sha256:32f8bfd1e9ad11270e2d9d2d6284daef5a09476676ee27329b64d973a0f493e0 <\/span><\/p>

<\/p>

\u6267\u884c\u5b8c\u7ea2\u8272\u90e8\u5206\u7684\u547d\u4ee4\u540e\u68c0\u67e5\uff0c\u5df2\u7ecf\u53ef\u4ee5\u770b\u5230\u4e3b\u8282\u70b9\u4e86\uff1a<\/strong><\/p>

[root@cluster-endpoint ~]# kubectl get nodes<\/p>

NAME               STATUS     ROLES                  AGE   VERSION<\/p>

cluster-endpoint   NotReady   control-plane,master   15m   v1.20.9<\/p>

<\/p>

#\u67e5\u770b\u96c6\u7fa4\u6240\u6709\u8282\u70b9\nkubectl get nodes\n\n#\u6839\u636e\u914d\u7f6e\u6587\u4ef6\uff0c\u7ed9\u96c6\u7fa4\u521b\u5efa\u8d44\u6e90\nkubectl apply -f xxxx.yaml\n\n#\u67e5\u770b\u96c6\u7fa4\u90e8\u7f72\u4e86\u54ea\u4e9b\u5e94\u7528\uff1f\ndocker ps   ===   kubectl get pods -A\n# \u8fd0\u884c\u4e2d\u7684\u5e94\u7528\u5728docker\u91cc\u9762\u53eb\u5bb9\u5668\uff0c\u5728k8s\u91cc\u9762\u53ebPod\nkubectl get pods -A<\/pre>

<\/p>

3\u3001<\/span>\u5b89\u88c5\u7f51\u7edc\u7ec4\u4ef6<\/h3>

calico\u5b98\u7f51<\/span><\/a><\/p>

curl https:\/\/docs.projectcalico.org\/manifests\/calico.yaml -O\n\nkubectl apply -f calico.yaml<\/pre>

\u5982\u679c\u62a5\uff1a<\/p>

error: unable to recognize "calico.yaml": no matches for kind "PodDisruptionBudget" in version "policy\/v1"<\/p>

\u6309\u8fd9\u4e2a\u91cd\u65b0\u6267\u884c\u4e00\u904d\uff0c\u7248\u672c\u95ee\u9898\uff1acurl https:\/\/docs.projectcalico.org\/v3.18\/manifests\/calico.yaml -O<\/p>

\u6216\u76f4\u63a5\u4e0b\u8f7d<\/p>

calico.rar<\/a><\/p>

kubectl apply -f calico.yaml<\/p>

\u518d\u6b21\u68c0\u67e5\u6210\u529f\uff1a<\/p>

[root@cluster-endpoint ~]# kubectl get nodes<\/p>

NAME               STATUS   ROLES                  AGE   VERSION<\/p>

cluster-endpoint   Ready    control-plane,master   23m   v1.20.9<\/p>

<\/p>

\u6269\u5c55\uff1acalico\u4e2d\u8fd9\u4e2a\u503c\u5c31\u662f\u4e3b\u8282\u70b9\u521d\u59cb\u5316\u7684pod-network-cidr\u7684\u9ed8\u8ba4\u503c\uff0c\u4e00\u6539\u7684\u8bdd2\u4e2a\u5730\u65b9\u90fd\u8981\u6539<\/strong><\/p>

[root@cluster-endpoint ~]# cat calico.yaml | grep 192.168<\/p>

            #   value: "192.168.0.0\/16"<\/p>

<\/p>

\u6700\u540e\u68c0\u67e5\u4e00\u4e0b\uff0c\u90fd\u6210\u529f\u4e86\uff1a<\/strong><\/p>

[root@cluster-endpoint ~]# kubectl get pods -A<\/p>

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE<\/p>

kube-system   calico-kube-controllers-56c7cdffc6-6z49z   1\/1     Running   0          10m<\/p>

kube-system   calico-node-q446p                          1\/1     Running   0          10m<\/p>

kube-system   coredns-5897cd56c4-6mc2x                   1\/1     Running   0          32m<\/p>

kube-system   coredns-5897cd56c4-jhnq5                   1\/1     Running   0          32m<\/p>

kube-system   etcd-cluster-endpoint                      1\/1     Running   0          33m<\/p>

kube-system   kube-apiserver-cluster-endpoint            1\/1     Running   0          33m<\/p>

kube-system   kube-controller-manager-cluster-endpoint   1\/1     Running   0          33m<\/p>

kube-system   kube-proxy-8shzk                           1\/1     Running   0          32m<\/p>

kube-system   kube-scheduler-cluster-endpoint            1\/1     Running   0          33m<\/p>

<\/p>

[root@cluster-endpoint ~]# kubectl get nodes<\/p>

NAME               STATUS   ROLES                  AGE   VERSION<\/p>

cluster-endpoint   Ready    control-plane,master   33m   v1.20.9<\/p>

4\u3001\u52a0\u5165node\u8282\u70b9<\/span><\/h3>
kubeadm join cluster-endpoint:6443 --token rxvjig.yplqam3qf3tq6s99 \\\n--discovery-token-ca-cert-hash sha256:32f8bfd1e9ad11270e2d9d2d6284daef5a09476676ee27329b64d973a0f493e0<\/pre>

\u4e0a\u8ff0\u547d\u4ee4\u7684\u4ee4\u724c\u6709\u6548\u671f\u4e3a24\u5c0f\u8fc7\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u83b7\u5f97\u65b0\u4ee4\u724c<\/span><\/p>

kubeadm token create --print-join-command<\/span><\/p>

<\/span><\/p>

\u5982\u679c\u52a0node\u8282\u70b9\u62a5\uff1aThe connection to the server localhost:8080 was refused - did you specify the right host or port?<\/span><\/p>

kubectl<\/code>\u547d\u4ee4\u9700\u8981\u4f7f\u7528<\/span>kubernetes-admin<\/code>\u6765\u8fd0\u884c\uff0c\u6240\u4ee5\u9700\u8981\u5c06\u4e3b\u8282\u70b9\u4e2d\u7684<\/span>\/etc\/kubernetes\/admin.conf<\/code>\u6587\u4ef6\u62f7\u8d1d\u5230\u4ece\u8282\u70b9\u7528\u6237\u76ee\u5f55\u4e0b\uff0c\u7136\u540e\u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff1a<\/span><\/span><\/p>

#\u5728Master\u8282\u70b9\u8fd0\u884c\u4e0b\u9762\u547d\u4ee4\u5c06admin.conf\u6587\u4ef6\u62f7\u8d1d\u5230\u4ece\u8282\u70b9<\/p>

sudo scp \/etc\/kubernetes\/admin.conf root@node\u8282\u70b9ip:~<\/p>

#\u5728Node\u8282\u70b9\u8fd0\u884c\u4e0b\u9762\u547d\u4ee4\u914d\u7f6e\u73af\u5883\u53d8\u91cf<\/p>

export KUBECONFIG=$HOME\/admin.conf<\/p>

<\/span><\/span>\u7b49\u5f85\u4e00\u4f1a\u540e\u68c0\u67e5\uff0c\u72b6\u6001\u4e3aready\u540e\u5373\u6210\u529f\uff1a<\/strong><\/p>

[root@cluster-endpoint ~]# kubectl get nodes<\/p>

NAME               STATUS   ROLES                  AGE   VERSION<\/p>

192.168.14.129     Ready    <none>                 14m   v1.20.9<\/p>

192.168.14.130     Ready    <none>                 52s   v1.20.9<\/p>

cluster-endpoint   Ready    control-plane,master   55m   v1.20.9<\/p>

<\/span><\/p>

\u9ad8\u53ef\u7528\u90e8\u7f72\u65b9\u5f0f\uff0c\u4e5f\u662f\u5728\u8fd9\u4e00\u6b65\u7684\u65f6\u5019\uff0c\u4f7f\u7528\u6dfb\u52a0\u4e3b\u8282\u70b9\u7684\u547d\u4ee4\u5373\u53ef<\/span><\/em><\/strong><\/p>

<\/p>

5\u3001\u90e8\u7f72dashboard(v2.3.1\u7684\u542f\u52a8\u4e0d\u4e86\uff0c\u6539\u62102.5.1\u7684\u4e86\uff09<\/span><\/h4>

kubernetes\u5b98\u65b9\u63d0\u4f9b\u7684\u53ef\u89c6\u5316\u754c\u9762<\/span><\/p>

https:\/\/github.com\/kubernetes\/dashboard<\/span><\/a><\/p>

kubectl apply -f https:\/\/raw.githubusercontent.com\/kubernetes\/dashboard\/v2.5.1\/aio\/deploy\/recommended.yaml<\/pre>

\u5982\u679c\u4e0a\u8ff0\u6587\u4ef6\u4e0b\u8f7d\u4e0d\u4e86\uff0c\u76f4\u63a5\u4e0b\u8f7d\u4ee5\u4e0b\u6587\u4ef6\uff0c\u5176\u4e2d\u7684docker image\u7248\u672c\u5df2\u6539\u62102.5.1<\/span>\u7136\u540e\u6267\u884c\u547d\u4ee4\u5373\u53ef\uff1akubectl apply -f recommended.yaml<\/p>

<\/p>

recommended.rar<\/a><\/p>

<\/p>

2\u3001\u8bbe\u7f6e\u8bbf\u95ee\u7aef\u53e3<\/span><\/h4>
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard<\/pre>

\u627e\u5230\u6587\u4ef6\u4e2dtype: ClusterIP \u5c06\u5176\u6539\u4e3a type: NodePort<\/span><\/p>

<\/p>

kubectl get svc -A |grep kubernetes-dashboard\n## \u627e\u5230\u7aef\u53e3\uff0c\u5728\u5b89\u5168\u7ec4\u653e\u884c<\/pre>

<\/p>

\u8bbf\u95ee\uff1a https:\/\/\u96c6\u7fa4\u4efb\u610fIP:\u7aef\u53e3 <\/span><\/p>

https:\/\/192.168.14.128:31268\/#\/login<\/p>

Kubernetes Dashboard<\/a><\/p>

<\/p>

3\u3001\u521b\u5efa\u8bbf\u95ee\u8d26\u53f7<\/span><\/h4>
#\u521b\u5efa\u8bbf\u95ee\u8d26\u53f7\uff0c\u51c6\u5907\u4e00\u4e2ayaml\u6587\u4ef6\uff1b vi dash.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: admin-user\n  namespace: kubernetes-dashboard\n---\napiVersion: rbac.authorization.k8s.io\/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: admin-user\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n  name: admin-user\n  namespace: kubernetes-dashboard<\/pre>
kubectl apply -f dash.yaml<\/pre>

<\/span><\/h4>

dash.rar<\/a><\/p>

<\/span><\/h4>

4\u3001\u83b7\u53d6\u8bbf\u95ee\u4ee4\u724c<\/span><\/h4>
#\u83b7\u53d6\u8bbf\u95ee\u4ee4\u724c\nkubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa\/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"<\/pre>

<\/p>

\u901a\u8fc7\u5f97\u5230\u7684\u4ee4\u724c\u767b\u5f55\u5373\u53ef<\/p>

<\/p>

<\/p>"}