1.20版本k8s简单部署教程
准备两台机器 主机名 IP 角色 test160 192.168.100.122 master test161 192.168.100.125 node 一、准备工作 1、两台机器关闭selinux,防火墙,交换分区 2、添加hosts [root@test160~]#vim/etc/hosts 127.0.0.1localhostlocalhost.localdomainlocalhost4localhost4.localdomain4 ::1localhostlocalhost.localdomainlocalhost6localhost6.localdomain6 192.168.100.125test161 [root@test161~]#vim/etc/hosts 127.0.0.1localhostlocalhost.localdomainlocalhost4localhost4.localdomain4 ::1localhostlocalhost.localdomainlocalhost6localhost6.localdomain6 185.199.110.153kubernetes.github.io 192.168.100.122test160 3、两台机添加kubelet 、docker yum源 [root@test160~]#cat/etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg [root@test160~]#yumlist|grepkube cockpit-kubernetes.x86_64195.12-1.el7.centosextras cri-tools.x86_641.13.0-0kubernetes kubeadm.x86_641.20.5-0kubernetes kubectl.x86_641.20.5-0kubernetes kubelet.x86_641.20.5-0kubernetes kubernetes.x86_641.5.2-0.7.git269f928.el7extras kubernetes-client.x86_641.5.2-0.7.git269f928.el7extras kubernetes-cni.x86_640.8.7-0kubernetes kubernetes-master.x86_641.5.2-0.7.git269f928.el7extras kubernetes-node.x86_641.5.2-0.7.git269f928.el7extras rkt.x86_641.27.0-1kubernetes rsyslog-mmkubernetes.x86_648.24.0-57.el7_9updates [root@test160k8s]#cat/etc/yum.repos.d/docker-ce.repo [docker-ce-stable] name=DockerCEStable-$basearch baseurl=https://download.docker.com/linux/centos/$releasever/$basearch/stable enabled=1 gpgcheck=1 gpgkey=https://download.docker.com/linux/centos/gpg 4、安装,各节点 yuminstall-ycri-toolskubeadmkubectlkubeletkubernetes-cnirkt.x86_64docker-ce cat>/etc/sysconfig/kubelet<<EOF KUBELET_EXTRA_ARGS="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2" EOF systemctldaemon-reload 5、启动 [root@test160~]#systemctlenabledocker Createdsymlinkfrom/etc/systemd/system/multi-user.target.wants/docker.serviceto/usr/lib/systemd/system/docker.service. [root@test160~]#systemctlstartdocker [root@test160~]#systemctlstartkubelet.service [root@test160~]#systemctlenablekubelet.service Createdsymlinkfrom/etc/systemd/system/multi-user.target.wants/kubelet.serviceto/usr/lib/systemd/system/kubelet.service. 6、所有节点配置k8s内核参数 cat<<EOF>/etc/sysctl.d/k8s.conf net.ipv4.ip_forward=1 net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 fs.may_detach_mounts=1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 net.ipv4.tcp_keepalive_time=600 net.ipv4.tcp_keepalive_probes=3 net.ipv4.tcp_keepalive_intvl=15 net.ipv4.tcp_max_tw_buckets=36000 net.ipv4.tcp_tw_reuse=1 net.ipv4.tcp_max_orphans=327680 net.ipv4.tcp_orphan_retries=3 net.ipv4.tcp_syncookies=1 net.ipv4.tcp_max_syn_backlog=16384 net.ipv4.ip_conntrack_max=65536 net.ipv4.tcp_max_syn_backlog=16384 net.ipv4.tcp_timestamps=0 net.core.somaxconn=16384 EOF sysctl--system 7、在master初始化集群 [root@test160~]#catnew.yaml apiVersion:kubeadm.k8s.io/v1beta2 bootstrapTokens: -groups: -system:bootstrappers:kubeadm:default-node-token token:7t2weq.bjbawausm0jaxury ttl:24h0m0s usages: -signing -authentication kind:InitConfiguration localAPIEndpoint: advertiseAddress:192.168.100.122 bindPort:6443 nodeRegistration: criSocket:/var/run/dockershim.sock name:test160 taints: -effect:NoSchedule key:node-role.kubernetes.io/master --- apiServer: certSANs: -192.168.100.122 timeoutForControlPlane:4m0s apiVersion:kubeadm.k8s.io/v1beta2 certificatesDir:/etc/kubernetes/pki clusterName:kubernetes controlPlaneEndpoint:192.168.100.122:6443 controllerManager:{} dns: type:CoreDNS etcd: local: dataDir:/var/lib/etcd imageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers kind:ClusterConfiguration kubernetesVersion:v1.20.4 networking: dnsDomain:cluster.local podSubnet:172.168.0.0/16 serviceSubnet:10.96.0.0/12 scheduler:{} [root@test160~]#kubeadminit--confignew.yaml--upload-certs [init]UsingKubernetesversion:v1.20.4 [preflight]Runningpre-flightchecks [WARNINGIsDockerSystemdCheck]:detected"cgroupfs"astheDockercgroupdriver.Therecommendeddriveris"systemd".Pleasefollowtheguideathttps://kubernetes.io/docs/setup/cri/ [WARNINGSystemVerification]:thisDockerversionisnotonthelistofvalidatedversions:20.10.5.Latestvalidatedversion:19.03 [WARNINGHostname]:hostname"test160"couldnotbereached [WARNINGHostname]:hostname"test160":lookuptest160on202.96.128.86:53:nosuchhost [preflight]PullingimagesrequiredforsettingupaKubernetescluster [preflight]Thismighttakeaminuteortwo,dependingonthespeedofyourinternetconnection [preflight]Youcanalsoperformthisactioninbeforehandusing'kubeadmconfigimagespull' [certs]UsingcertificateDirfolder"/etc/kubernetes/pki" [certs]Generating"ca"certificateandkey [certs]Generating"apiserver"certificateandkey [certs]apiserverservingcertissignedforDNSnames[kuberneteskubernetes.defaultkubernetes.default.svckubernetes.default.svc.cluster.localtest160]andIPs[10.96.0.1192.168.100.122] [certs]Generating"apiserver-kubelet-client"certificateandkey [certs]Generating"front-proxy-ca"certificateandkey [certs]Generating"front-proxy-client"certificateandkey [certs]Generating"etcd/ca"certificateandkey [certs]Generating"etcd/server"certificateandkey [certs]etcd/serverservingcertissignedforDNSnames[localhosttest160]andIPs[192.168.100.122127.0.0.1::1] [certs]Generating"etcd/peer"certificateandkey [certs]etcd/peerservingcertissignedforDNSnames[localhosttest160]andIPs[192.168.100.122127.0.0.1::1] [certs]Generating"etcd/healthcheck-client"certificateandkey [certs]Generating"apiserver-etcd-client"certificateandkey [certs]Generating"sa"keyandpublickey [kubeconfig]Usingkubeconfigfolder"/etc/kubernetes" [kubeconfig]Writing"admin.conf"kubeconfigfile [kubeconfig]Writing"kubelet.conf"kubeconfigfile [kubeconfig]Writing"controller-manager.conf"kubeconfigfile [kubeconfig]Writing"scheduler.conf"kubeconfigfile [kubelet-start]Writingkubeletenvironmentfilewithflagstofile"/var/lib/kubelet/kubeadm-flags.env" [kubelet-start]Writingkubeletconfigurationtofile"/var/lib/kubelet/config.yaml" [kubelet-start]Startingthekubelet [control-plane]Usingmanifestfolder"/etc/kubernetes/manifests" [control-plane]CreatingstaticPodmanifestfor"kube-apiserver" [control-plane]CreatingstaticPodmanifestfor"kube-controller-manager" [control-plane]CreatingstaticPodmanifestfor"kube-scheduler" [etcd]CreatingstaticPodmanifestforlocaletcdin"/etc/kubernetes/manifests" [wait-control-plane]WaitingforthekubelettobootupthecontrolplaneasstaticPodsfromdirectory"/etc/kubernetes/manifests".Thiscantakeupto4m0s [kubelet-check]Initialtimeoutof40spassed. [apiclient]Allcontrolplanecomponentsarehealthyafter67.501960seconds [upload-config]StoringtheconfigurationusedinConfigMap"kubeadm-config"inthe"kube-system"Namespace [kubelet]CreatingaConfigMap"kubelet-config-1.20"innamespacekube-systemwiththeconfigurationforthekubeletsinthecluster [upload-certs]StoringthecertificatesinSecret"kubeadm-certs"inthe"kube-system"Namespace [upload-certs]Usingcertificatekey: 9f2632e16029303fffc5f56e53c29b1d0dd55a444b12e2f49e93bd6f6e7361b4 [mark-control-plane]Markingthenodetest160ascontrol-planebyaddingthelabels"node-role.kubernetes.io/master=''"and"node-role.kubernetes.io/control-plane=''(deprecated)" [mark-control-plane]Markingthenodetest160ascontrol-planebyaddingthetaints[node-role.kubernetes.io/master:NoSchedule] [bootstrap-token]Usingtoken:7t2weq.bjbawausm0jaxury [bootstrap-token]Configuringbootstraptokens,cluster-infoConfigMap,RBACRoles [bootstrap-token]configuredRBACrulestoallowNodeBootstraptokenstogetnodes [bootstrap-token]configuredRBACrulestoallowNodeBootstraptokenstopostCSRsinorderfornodestogetlongtermcertificatecredentials [bootstrap-token]configuredRBACrulestoallowthecsrapprovercontrollerautomaticallyapproveCSRsfromaNodeBootstrapToken [bootstrap-token]configuredRBACrulestoallowcertificaterotationforallnodeclientcertificatesinthecluster [bootstrap-token]Creatingthe"cluster-info"ConfigMapinthe"kube-public"namespace [kubelet-finalize]Updating"/etc/kubernetes/kubelet.conf"topointtoarotatablekubeletclientcertificateandkey [addons]Appliedessentialaddon:CoreDNS [addons]Appliedessentialaddon:kube-proxy YourKubernetescontrol-planehasinitializedsuccessfully! Tostartusingyourcluster,youneedtorunthefollowingasaregularuser: mkdir-p$HOME/.kube sudocp-i/etc/kubernetes/admin.conf$HOME/.kube/config sudochown$(id-u):$(id-g)$HOME/.kube/config Alternatively,ifyouaretherootuser,youcanrun: exportKUBECONFIG=/etc/kubernetes/admin.conf Youshouldnowdeployapodnetworktothecluster. Run"kubectlapply-f[podnetwork].yaml"withoneoftheoptionslistedat: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Youcannowjoinanynumberofthecontrol-planenoderunningthefollowingcommandoneachasroot: kubeadmjoin192.168.100.122:6443--token7t2weq.bjbawausm0jaxury\ --discovery-token-ca-cert-hashsha256:4e1bd4a557221e73a0d3b7d168a2e02522475ec9a775602577cf4a1117fcfb9d\ --control-plane--certificate-key9f2632e16029303fffc5f56e53c29b1d0dd55a444b12e2f49e93bd6f6e7361b4 Pleasenotethatthecertificate-keygivesaccesstoclustersensitivedata,keepitsecret! Asasafeguard,uploaded-certswillbedeletedintwohours;Ifnecessary,youcanuse "kubeadminitphaseupload-certs--upload-certs"toreloadcertsafterward. Thenyoucanjoinanynumberofworkernodesbyrunningthefollowingoneachasroot: kubeadmjoin192.168.100.122:6443--token7t2weq.bjbawausm0jaxury\ --discovery-token-ca-cert-hashsha256:4e1bd4a557221e73a0d3b7d168a2e02522475ec9a775602577cf4a1117fcfb9d [root@test160~]#exportKUBECONFIG=/etc/kubernetes/admin.conf 记录下kubeadm join 这段,node加入集群会用到 配置环境变量,加入这段export KUBECONFIG=/etc/kubernetes/admin.conf [root@test160~]#vim/root/.bashrc exportKUBECONFIG=/etc/kubernetes/admin.conf [root@test160~]#source.bashrc [root@test160~]#kubectlgetnodes NAMESTATUSROLESAGEVERSION test160NotReadycontrol-plane,master5m14sv1.20.5 8、节点加入集群 [root@test161~]#kubeadmjoin192.168.100.122:6443--token7t2weq.bjbawausm0jaxury\ >--discovery-token-ca-cert-hashsha256:4e1bd4a557221e73a0d3b7d168a2e02522475ec9a775602577cf4a1117fcfb9d [preflight]Runningpre-flightchecks [WARNINGIsDockerSystemdCheck]:detected"cgroupfs"astheDockercgroupdriver.Therecommendeddriveris"systemd".Pleasefollowtheguideathttps://kubernetes.io/docs/setup/cri/ [WARNINGSystemVerification]:thisDockerversionisnotonthelistofvalidatedversions:20.10.5.Latestvalidatedversion:19.03 [WARNINGHostname]:hostname"test161"couldnotbereached [WARNINGHostname]:hostname"test161":lookuptest161on202.96.128.86:53:nosuchhost [preflight]Readingconfigurationfromthecluster... [preflight]FYI:Youcanlookatthisconfigfilewith'kubectl-nkube-systemgetcmkubeadm-config-oyaml' [kubelet-start]Writingkubeletconfigurationtofile"/var/lib/kubelet/config.yaml" [kubelet-start]Writingkubeletenvironmentfilewithflagstofile"/var/lib/kubelet/kubeadm-flags.env" [kubelet-start]Startingthekubelet [kubelet-start]WaitingforthekubelettoperformtheTLSBootstrap... Thisnodehasjoinedthecluster: *Certificatesigningrequestwassenttoapiserverandaresponsewasreceived. *TheKubeletwasinformedofthenewsecureconnectiondetails. Run'kubectlgetnodes'onthecontrol-planetoseethisnodejointhecluster. 在master查看结果,加入成功了,虽然状态还是NotReady的,是网络还没有 [root@test160~]#kubectlgetnodes NAMESTATUSROLESAGEVERSION test160NotReadycontrol-plane,master84mv1.20.5 test161NotReady<none>42sv1.20.5 9、配置网络 下载脚本,不用改什么,直接安装即可 [root@test160~]#curlhttps://docs.projectcalico.org/manifests/canal.yaml-O %Total%Received%XferdAverageSpeedTimeTimeTimeCurrent DloadUploadTotalSpentLeftSpeed 100184k100184k003360900:00:050:00:05--:--:--51158 [root@test160~]#ll total196 -rw-------.1rootroot1289Mar1000:41anaconda-ks.cfg -rw-r--r--.1rootroot188519Apr215:28canal.yaml -rw-r--r--.1rootroot979Apr213:56new.yaml 安装,现在去看个节点都ready了 [root@test160~]#kubectlapply-fcanal.yaml configmap/canal-configcreated customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.orgcreated customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.orgcreated clusterrole.rbac.authorization.k8s.io/calico-kube-controllerscreated clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllerscreated clusterrole.rbac.authorization.k8s.io/calico-nodecreated clusterrole.rbac.authorization.k8s.io/flannelcreated clusterrolebinding.rbac.authorization.k8s.io/canal-flannelcreated clusterrolebinding.rbac.authorization.k8s.io/canal-calicocreated daemonset.apps/canalcreated serviceaccount/canalcreated deployment.apps/calico-kube-controllerscreated serviceaccount/calico-kube-controllerscreated poddisruptionbudget.policy/calico-kube-controllerscreated [root@test160~]#kubectlgetnodes NAMESTATUSROLESAGEVERSION test160Readycontrol-plane,master107mv1.20.5 test161Ready<none>24mv1.20.5