前言
最近中国和印度的局势也是愈演愈烈。作为一个爱国青年我有些愤怒,但有时又及其的骄傲。不知道是因为中国外交强势还是软弱,怎样也应该有个态度吧?这是干嘛?就会抗议 在不就搞一些军演。有毛用啊?
自己判断可能是国家有自己的打算吧!就好比狮子和疯狗一样何必那!中国和印度的纷纷扰扰,也不知道怎样霸气侧漏还是在伤仲永。
霸气侧漏是航母的电子弹射还是核潜艇或者是无人机.....
项目开始
我想大家都知道docker 但是也都玩过k8s吧!
搭建kubernetes集群时遇到一些问题,网上有不少搭建文档可以参考,但是满足以下网络互通才能算k8s集群ready。
需求如下:
![1.png wKiom1mfti7zbBRZAAAUu-w6LIc906.png-wh_50]()
k8s结构图如下:
![2.png wKioL1mftrKiOI-mAAEdAlUf1SQ837.png-wh_50]()
以下是版本和机器信息:
![3.png wKioL1mft3TgKBxoAAAd-A2OtG4723.png-wh_50]()
节点初始化
mv -f /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bk;
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
设置bridge
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
EOF
sudo sysctl --system
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
sudo systemctl disable firewalld.service
sudo systemctl stop firewalld.service
sudo yum install -y iptables-services;iptables -F; #可略过sudo systemctl disable iptables.service
sudo systemctl stop iptables.service
sudo yum install -y vim wget curl screen git etcd ebtables flannel
sudo yum install -y socat net-tools.x86_64 iperf bridge-utils.x86_64
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum install -y libdevmapper* docker
![6.png wKioL1mfuJzRImhIAAAgs6fFdCI330.png-wh_50]()
reboot
重启机器后执行如下步骤
cat <<EOF >/etc/sysconfig/docker
OPTIONS="-H unix:///var/run/docker.sock -H tcp://127.0.0.1:2375 --storage-driver=overlay --exec-opt native.cgroupdriver=cgroupfs --graph=/localdisk/docker/graph --insecure-registry=gcr.io --insecure-registry=quay.io --insecure-registry=registry.cn-hangzhou.aliyuncs.com --registry-mirror=http://138f94c6.m.daocloud.io"EOF
systemctl start docker
systemctl status docker -l
quay.io/calico/node:v1.3.0
quay.io/calico/cni:v1.9.1
quay.io/calico/kube-policy-controller:v0.6.0
gcr.io/google_containers/pause-amd64:3.0
gcr.io/google_containers/kube-proxy-amd64:v1.7.2
gcr.io/google_containers/kube-apiserver-amd64:v1.7.2
gcr.io/google_containers/kube-controller-manager-amd64:v1.7.2
gcr.io/google_containers/kube-scheduler-amd64:v1.7.2
gcr.io/google_containers/etcd-amd64:3.0.17
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.4
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.4
screen etcd -name="EtcdServer" -initial-advertise-peer-urls=http://10.12.0.22:2380 -listen-peer-urls=http://0.0.0.0:2380 -listen-client-urls=http://10.12.0.22:2379 -advertise-client-urls http://10.12.0.22:2379 -data-dir /var/lib/etcd/default.etcd
etcdctl --endpoint=http://10.12.0.22:2379 member list
etcdctl --endpoint=http://10.12.0.22:2379 cluster-health
cat << EOF >kubeadm_config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
advertiseAddress: 10.12.0.18 bindPort: 6443
etcd:
endpoints:
- http://10.12.0.22:2379
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/16
podSubnet: 10.68.0.0/16
kubernetesVersion: v1.7.2#token: <string>#tokenTTL: 0EOF##kubeadm init --config kubeadm_config.yaml
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get cs -o wide --show-labels
kubectl get nodes -o wide --show-labels
systemctl start docker
systemctl start kubelet
kubeadm join --token *{6}.*{16} 10.12.0.18:6443 --skip-preflight-checks
kubectl get nodes -o wide
watch kubectl get all --all-namespaces -o wide
kind: Config MapapiVersion: v1metadata: name: calico-config namespace: kube-systemdata:
etcd_endpoints: "http://10.12.0.22:2379"
calico_backend: "bird"
cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.1.0", "type": "calico", "etcd_endpoints": "__ETCD_ENDPOINTS__", "log_level": "info", "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s", "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" }, "kubernetes": { "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" } }---
kind: DaemonSetapiVersion: extensions/v1beta1metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-nodespec:selector:matchLabels:k8s-app: calico-node template: metadata:labels: k8s-app: calico-node annotations:
scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule
key: CriticalAddonsOnly operator: Exists serviceAccountName: calico-cni-plugin containers:
- name: calico-node image:quay.io/calico/node:v1.3.0 env:
- name: ETCD_ENDPOINTS valueFrom:configMapKeyRef: name: calico-config key: etcd_endpoints
- name: CALICO_NETWORKING_BACKEND valueFrom:config MapKeyRef: name: calico-config key: calico_backend
- name: CALICO_DISABLE_FILE_LOGGING value: "true"
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT"
- name: CALICO_IPV4POOL_CIDR value: "10.68.0.0/16" - name: CALICO_IPV4POOL_IPIP value: "always"
- name: FELIX_IPV6SUPPORT value: "false"
- name:FELIX_LOGSEVERITYSCREEN value: "info"
- name: IP value: "" securityContext:privileged: true resources: requests: cpu: 250m volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountP/var/run/calico name: var-run-calico readOnly: false
- name: install-cni image: quay.io/calico/cni:v1.9.1 command: ["/install-cni.sh"] env:
- name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints
- name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config volumeMounts: - mountPath: /host/opt/cni/bin ame: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir volumes:
- name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d---
apiVersion: extensions/v1beta1kind: Deploymentmetadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policyspec:
replicas: 1 strategy: type: Recreate template: metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy-controller annotations:
scheduler.alpha.kubernetes.io/critical-pod: '' spec:
hostNetwork: true tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule
- key: CriticalAddonsOnly operator: Exists serviceAccountName: calico-policy-controller containers: - name: calico-policy-controller image: quay.io/calico/kube-policy-controller:v0.6.0 env:
- name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints
- name: K8S_API value: "https://kubernetes.default:443"
- name: CONFIGURE_ETC_HOSTS value: "true"---apiVersion: rbac.authorization.k8s.io/v1beta1kind: ClusterRoleBindingmetadata: name: calico-cni-pluginroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-cni-pluginsubjects:- kind: ServiceAccount name: calico-cni-plugin namespace: kube-system---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata: name: calico-cni-plugin namespace: kube-systemrules: - apiGroups: [""] resources: - pods - nodes verbs: - get---apiVersion: v1kind: ServiceAccountmetadata: name: calico-cni-plugin namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1beta1kind: ClusterRoleBindingmetadata: name: calico-policy-controllerroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-policy-controllersubjects:- kind: ServiceAccount name: calico-policy-controller namespace: kube-system---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata: name: calico-policy-controller namespace: kube-systemrules: - apiGroups: - "" - extensions resources: - pods - namespaces - networkpolicies verbs: - watch - list---apiVersion: v1kind: ServiceAccountmetadata: name: calico-policy-controller namespace: kube-system
kubectl apply -f calico.yaml
>kubectl get all --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system po/calico-node-2gqf2 2/2 Running 0 19h
kube-system po/calico-node-fg8gh 2/2 Running 0 19h
kube-system po/calico-node-ksmrn 2/2 Running 0 19h
kube-system po/calico-policy-controller-1727037546-zp4lp 1/1 Running 0 19h
kube-system po/etcd-izuf6fb3vrfqnwbct6ivgwz 1/1 Running 0 19h
kube-system po/kube-apiserver-izuf6fb3vrfqnwbct6ivgwz 1/1 Running 0 19h
kube-system po/kube-controller-manager-izuf6fb3vrfqnwbct6ivgwz 1/1 Running 0 19h
kube-system po/kube-dns-2425271678-3t4g6 3/3 Running 0 19h
kube-system po/kube-proxy-6fg1l 1/1 Running 0 19h
kube-system po/kube-proxy-fdbt2 1/1 Running 0 19h
kube-system po/kube-proxy-lgf3z 1/1 Running 0 19h
kube-system po/kube-scheduler-izuf6fb3vrfqnwbct6ivgwz 1/1 Running 0 19h
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default svc/kubernetes 10.96.0.1 <none> 443/TCP 19h
kube-system svc/kube-dns 10.96.0.10 <none> 53/UDP,53/TCP 19h
NAMESPACE NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
kube-system deploy/calico-policy-controller 1 1 1 1 19h
kube-system deploy/kube-dns 1 1 1 1 19h
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system rs/calico-policy-controller-1727037546 1 1 1 19h
kube-system rs/kube-dns-2425271678 1 1 1 19h
wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
kubectl create -f kubernetes-dashboard.yaml
wget https://github.com/kubernetes/heapster/archive/v1.4.0.tar.gz
tar -zxvf v1.4.0.tar.gzcd heapster-1.4.0/deploy/kube-config/influxdb
kubectl create -f ./
其他命令
kubectl delete pod <podname> --namespace=<namspacer> --grace-period=0 --force
kubeadm reset
systemctl stop kubelet;
docker ps -aq | xargs docker rm -fv
find /var/lib/kubelet | xargs -n 1 findmnt -n -t tmpfs -o TARGET -T | uniq | xargs -r umount -v;
rm -rf /var/lib/kubelet /etc/kubernetes/ /var/lib/etcd
systemctl start kubelet;
kubectl proxy --address=0.0.0.0 --port=8001 --accept-hosts='^.*'
or
kubectl proxy --port=8011 --address=192.168.61.100 --accept-hosts='^192\.168\.61\.*'
access to http://0.0.0.0:8001/ui
APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ")
TOKEN=$(kubectl describe secret $(kubectl get secrets | grep default | cut -f1 -d ' ') | grep -E '^token' | cut -f2 -d':' | tr -d '\t')
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
kubectl taint nodes --all node-role.kubernetes.io/master-
or
kubectl taint nodes --all dedicated-
Name: izuf6fb3vrfqnwbct6ivgwzRole:Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/hostname=izuf6fb3vrfqnwbct6ivgwz
node-role.kubernetes.io/master=Annotations: node.alpha.kubernetes.io/ttl=0
volumes.kubernetes.io/controller-managed-attach-detach=true
Name: izuf6fb3vrfqnwbct6ivgwzRole:Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/hostname=izuf6fb3vrfqnwbct6ivgwz
node-role.kubernetes.io/master=Annotations: node.alpha.kubernetes.io/ttl=0
volumes.kubernetes.io/controller-managed-attach-detach=trueTaints: <none>
总结:通过测试已经完成但是还有错看过文档的伙伴能猜到吗?