1. 环境配置

(1)安装前环境配置

关闭防火墙、selinux、swap,配置系统句柄数,修改内核参数,开启 ipvs,配置时间同步

$ source <(curl -sL https://gitee.com/jack_zang/kubernetes/raw/master/install/kubeadm_1.24/prepare_env.sh)

注意:请自行配置主机名,这里使用了 dns 解析主机名,所以就不在配置

(2)升级内核

$ source <(curl -sL https://gitee.com/jack_zang/kubernetes/raw/master/install/kubeadm_1.24/prepare_env_update_kernel.sh)

$ reboot
$ uname -r
6.0.0-1.el8.elrepo.x86_64

(3)安装 kubernetes

$ source <(curl -sL https://gitee.com/jack_zang/kubernetes/raw/master/install/kubeadm_1.24/kubernetes_install.sh)

2. kubeadm 配置单 master 集群

(1)master 节点

//创建初始化配置文件

$ cat > /etc/kubernetes/kubeadm-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.24.6
controlPlaneEndpoint: "192.168.10.211:6443"
apiServer:
  certSANs:
  - 192.168.10.211
networking:
  # This CIDR is a Calico default. Substitute or remove for your CNI provider.
  podSubnet: 10.244.0.0/16
imageRepository: "registry.aliyuncs.com/google_containers"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF

kubeadm配置文件语法参考: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2

//初始化集群

$ kubeadm init --config /etc/kubernetes/kubeadm-config.yaml
...
  kubeadm join 192.168.10.211:6443 --token 7jnm3f.7o1zemvpehljtzz0 \
    --discovery-token-ca-cert-hash sha256:aae446a2bdac21d89856f1ebad543641fda1925b2758676e71a4550f57cf301a \
    --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.211:6443 --token 7jnm3f.7o1zemvpehljtzz0 \
    --discovery-token-ca-cert-hash sha256:aae446a2bdac21d89856f1ebad543641fda1925b2758676e71a4550f57cf301a 

$ mkdir -p $HOME/.kube
$ cp -i /etc/kubernetes/admin.conf  $HOME/.kube/config
$ chown  $(id -u):$(id -g)  $HOME/.kube/config

// 安装 calico

$ kubectl apply -f https://gitee.com/jack_zang/kubernetes/raw/master/install/kubeadm_1.24/calico-3.24.2.yaml

该配置文件其实就是官方配置文件,只不过把以下注释内容打开并修改为上面配置的网段

$ curl -s https://gitee.com/jack_zang/kubernetes/raw/master/install/kubeadm_1.24/calico-3.24.2.yaml |grep -A 1 CALICO_IPV4POOL_CIDR
            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"

// 验证 calico

$ kubectl get pods -n kube-system
NAME                                          READY   STATUS    RESTARTS   AGE
calico-kube-controllers-6bb4597c4f-2ldpd      1/1     Running   0          4m2s
calico-node-ch4lz                             1/1     Running   0          4m2s
coredns-74586cf9b6-jztz7                      1/1     Running   0          39m
coredns-74586cf9b6-vrj89                      1/1     Running   0          39m
etcd-c810211.xiodi.cn                      1/1     Running   0          39m
kube-apiserver-c810211.jieheyun.cc            1/1     Running   0          39m
kube-controller-manager-c810211.xiodi.cn   1/1     Running   0          39m
kube-proxy-2764t                              1/1     Running   0          39m
kube-scheduler-c810211.xiodi.cn            1/1     Running   0          39m

$ kubectl get nodes
NAME                  STATUS   ROLES           AGE   VERSION
c810211.xiodi.cn   Ready    control-plane   40m   v1.24.6

(2)helm 方式安装 calico(看看即可)

// 安装 helm

$ wget https://get.helm.sh/helm-v3.6.0-linux-amd64.tar.gz
$ tar -zxvf helm-v3.6.0-linux-amd64.tar.gz
$ mv linux-amd64/helm  /usr/local/bin/

// 安装 pod network组件 Calico

$ wget https://github.com/projectcalico/calico/releases/download/v3.20.0/tigera-operator-v3.20.0-1.tgz
$ helm show values tigera-operator-v3.20.0-1.tgz
## helm install calico tigera-operator-v3.20.0-1.tgz -f values.yaml
$ helm install calico tigera-operator-v3.20.0-1.tgz

// Calico 验证

$ kubectl get pods -n tigera-operator
NAME                               READY   STATUS    RESTARTS   AGE
tigera-operator-698876cbb5-drgw2   1/1     Running   0          56m

$ kubectl get pods -n calico-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-868b656ff4-z6vrm   1/1     Running   0          16m
calico-node-8cp5f                          1/1     Running   0          16m
calico-node-8dh76                          1/1     Running   0          16m
calico-node-zbnkq                          1/1     Running   0          16m
calico-typha-85768d896c-8m64x              1/1     Running   0          16m
calico-typha-85768d896c-c94rx              1/1     Running   0          16m
calico-typha-85768d896c-nwkp4              1/1     Running   0          16m

$ kubectl api-resources | grep calico
bgpconfigurations                              crd.projectcalico.org/v1               false        BGPConfiguration
bgppeers                                       crd.projectcalico.org/v1               false        BGPPeer
blockaffinities                                crd.projectcalico.org/v1               false        BlockAffinity
clusterinformations                            crd.projectcalico.org/v1               false        ClusterInformation
felixconfigurations                            crd.projectcalico.org/v1               false        FelixConfiguration
globalnetworkpolicies                          crd.projectcalico.org/v1               false        GlobalNetworkPolicy
globalnetworksets                              crd.projectcalico.org/v1               false        GlobalNetworkSet
hostendpoints                                  crd.projectcalico.org/v1               false        HostEndpoint
ipamblocks                                     crd.projectcalico.org/v1               false        IPAMBlock
ipamconfigs                                    crd.projectcalico.org/v1               false        IPAMConfig
ipamhandles                                    crd.projectcalico.org/v1               false        IPAMHandle
ippools                                        crd.projectcalico.org/v1               false        IPPool
kubecontrollersconfigurations                  crd.projectcalico.org/v1               false        KubeControllersConfiguration
networkpolicies                                crd.projectcalico.org/v1               true         NetworkPolicy
networksets                                    crd.projectcalico.org/v1               true         NetworkSet

// 如果出现镜像无法下载(每个节点执行)

$ docker pull aishangwei/tigera-operator:v1.20.0
$ docker tag aishangwei/tigera-operator:v1.20.0 quay.io/tigera/operator:v1.20.0

(3)Node 节点

//加入集群

$ kubeadm join 192.168.10.211:6443 --token 7jnm3f.7o1zemvpehljtzz0 \
> --discovery-token-ca-cert-hash sha256:aae446a2bdac21d89856f1ebad543641fda1925b2758676e71a4550f57cf301a

//如果已经不记得加入集群的命令

$ kubeadm token create --print-join-command
kubeadm join 192.168.10.211:6443 --token fkfdxo.gqq9csrm8m11tvz8 --discovery-token-ca-cert-hash sha256:aae446a2bdac21d89856f1ebad543641fda1925b2758676e71a4550f57cf301a

// 验证节点

$ kubectl get pods -n kube-system -o wide
$ kubectl get nodes
NAME                  STATUS   ROLES           AGE     VERSION
c810211.xiodi.cn   Ready    control-plane   58m     v1.24.6
c810212.xiodi.cn   Ready    <none>          4m46s   v1.24.6
c810213.xiodi.cn   Ready    <none>          4m35s   v1.24.6

3. 扩展

(1)删除节点

$ kubectl drain c810213.xiodi.cn --delete-local-data --force --ignore-daemonsets
$ kubectl delete node c810213.xiodi.cn