kubernetes搭建

Kubernetes部署方式

二进制安装

通过二进制包、yum命令等来安装所有组件。

Kubeadm工具安装

容器引擎、kubelet使用软件仓库安装(yum、apt),kubeadm会把其他k8s组件都使用静态Pod(没有控制器,挂掉会自动重启)的形式启用这些组件。

Kubeadm部署

准备工作

主机名IP配置

1
2
3
4
5
6
7
8
9
10
hostnamectl set-hostname k8s-master-01
hostnamectl set-hostname k8s-node-01
hostnamectl set-hostname k8s-node-02

cat >> /etc/hosts << "EOF"
10.6.6.1 master01.k8s.cn m1 m1.k8s.cn
10.6.6.2 master02.k8s.cn m2 m2.k8s.cn
10.6.6.4 node01.k8s.cn n1 n1.k8s.cn
10.6.6.5 node02.k8s.cn n2 n2.k8s.cn
EOF

关闭swap分区

1
2
3
4
5
6
# 关闭swap分区
swapoff -a

# 注释swap分区
cp /etc/fstab /etc/fstab.bak
sed -i "/swap/d" /etc/fstab

关闭防火墙

1
systemctl disable --now ufw

增大文件打开数量

1
2
3
4
cat > /etc/security/limits.d/k8s.conf << "EOF"
* soft nofile 65535
* hard nofile 131070
EOF

节点模块配置

1
2
3
4
5
6
7
8
9
10
cat << EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

modprobe br_netfilter
modprobe overlay

lsmod | grep br_netfilter
lsmod | grep overlay

网桥过滤及内核转发配置

1
2
3
4
5
6
7
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ipv6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl --system

同步集群时间

chrony服务端
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
apt install -y chrony

mv /etc/chrony/chrony.conf /etc/chrony/chrony.conf.bak

cat > /etc/chrony/chrony.conf << EOF
server ntp1.aliyun.com iburst minpoll 4 maxpoll 10
server ntp2.aliyun.com iburst minpoll 4 maxpoll 10
server ntp3.aliyun.com iburst minpoll 4 maxpoll 10
server ntp4.aliyun.com iburst minpoll 4 maxpoll 10
server ntp5.aliyun.com iburst minpoll 4 maxpoll 10
server ntp6.aliyun.com iburst minpoll 4 maxpoll 10
server ntp7.aliyun.com iburst minpoll 4 maxpoll 10
server 10.6.6.1 iburst minpoll 4 maxpoll 10
makestep 10 3
rtcsync
allow 0.0.0.0/0
local stratum 10
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
logdir /var/log/chrony
stratumweight 0.05
noclientlog
logchange 0.5
maxdistance 600.0
EOF

systemctl restart chrony
systemctl enable chrony
timedatectl set-timezone Asia/Shanghai
chrony客户端
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
apt install -y chrony

cat > /etc/chrony/chrony.conf << EOF
server 10.6.6.1 iburst minpoll 4 maxpoll 10
makestep 10 3
rtcsync
allow 0.0.0.0/0
local stratum 10
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
logdir /var/log/chrony
stratumweight 0.05
noclientlog
logchange 0.5
maxdistance 600.0
EOF

systemctl restart chrony
systemctl enable chrony
timedatectl set-timezone Asia/Shanghai
apt install -y ntpdate
timedatectl set-ntp yes
ntpdate 10.6.6.1
echo "*/5 * * * * ntpdate 10.6.6.1"

安装IPVS

1
2
3
4
5
6
7
8
9
10
11
12
13
14
apt install -y ipvsadm ipset
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack

containerd配置

1
2
3
4
5
6
7
8
9
10
11
12
13
tar -xf cri-containerd-1.7.23-linux-amd64.tar.gz -C /

mkdir /etc/containerd && cd /etc/containerd
containerd config default > config.toml

pause容器镜像仓库地址
sed -i 's/registry.k8s.io\/pause:3.8/crpi-lphhnxw99vkg46ps.cn-chengdu.personal.cr.aliyuncs.com\/ssddffaa_kubernetes\/pause:3.10/' /etc/containerd/config.toml

SystemCgroup开启
sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/' /etc/containerd/config.toml

systemctl daemon-reload && systemctl restart containerd
systemctl enable --now containerd

kubeadm阿里源

1
2
3
4
5
6
7
8
9
apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/Release.key |
gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/ /" |
tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
systemctl enable kubelet --now

kubeadm init

命令行

1
2
3
4
5
6
7
8
9
kubeadm init \
--image-repository=harbor.k8s.cn/k8s \
--kubernetes-version=v1.31.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16

# 也可以使用
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos # 老版本的可以用,新版本不行
# 可选项:--apiserver-advertise-address=192.168.71.12 # 如果是高可用部署,那该地址指向vip地址即可

kubeadm.yaml配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
kubeadm config print init-defaults > kubeadm.yaml

1 apiVersion: kubeadm.k8s.io/v1beta4
2 bootstrapTokens:
3 - groups:
4 - system:bootstrappers:kubeadm:default-node-token
5 token: abcdef.0123456789abcdef
6 ttl: 24h0m0s
7 usages:
8 - signing
9 - authentication
10 kind: InitConfiguration
11 localAPIEndpoint:
12 advertiseAddress: 172.18.104.158 #控制节点地址
13 bindPort: 6443
14 nodeRegistration:
15 criSocket: unix:///var/run/containerd/containerd.sock
16 imagePullPolicy: IfNotPresent
17 imagePullSerial: true
18 name: k8s-master-01 #节点名称
19 taints: null
20 timeouts:
21 controlPlaneComponentHealthCheck: 4m0s
22 discovery: 5m0s
23 etcdAPICall: 2m0s
24 kubeletHealthCheck: 4m0s
25 kubernetesAPICall: 1m0s
26 tlsBootstrap: 5m0s
27 upgradeManifests: 5m0s
28 ---
29 apiServer: {}
30 apiVersion: kubeadm.k8s.io/v1beta4
31 caCertificateValidityPeriod: 87600h0m0s
32 certificateValidityPeriod: 8760h0m0s
33 certificatesDir: /etc/kubernetes/pki
34 clusterName: kubernetes
35 controllerManager: {}
controlPlaneEndpoint: 10.10.0.10:7443 #控制平面
36 dns: {}
37 encryptionAlgorithm: RSA-2048
38 etcd:
39 local:
40 dataDir: /var/lib/etcd
41 imageRepository: crpi-lphhnxw99vkg46ps.cn-chengdu.personal.cr.aliyuncs.com/ssddffaa_kubernetes #镜像仓库名称
42 kind: ClusterConfiguration
43 kubernetesVersion: 1.31.2 #k8s版本
44 networking:
45 dnsDomain: cluster.local
46 serviceSubnet: 10.96.0.0/12
47 podSubnet: 10.244.0.0/16 #增加一行,指定pod网段
48 proxy: {}
49 scheduler: {}
50 --- #增加内容
51 apiVersion: kubeproxy.config.k8s.io/v1alpha1
52 kind: KubeProxyConfiguration
53 mode: ipvs
54 ---
55 apiVersion: kubelet.config.k8s.io/v1beta1
56 kind: KubeletConfiguration
57 cgroupDriver: systemd

kubeadm init --config=kubeadm.yaml

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

清理资源

1
2
3
4
5
6
7
kubeadm reset -f
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/cni/
rm -rf /opt/cni/
rm -rf /var/lib/etcd
rm -rf /var/etcd

完成后

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
master01 :
kubeadm join 10.10.10.250:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:078dccd1ab07c68177e8dda8287d223796a3c1352f5b9bd54129190f8c5a7587

kubeadm join 10.10.10.250:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:078dccd1ab07c68177e8dda8287d223796a3c1352f5b9bd54129190f8c5a7587 \
--control-plane --certificate-key b256f520b2bfe2a24f03dffda3ada25536c44732a2bf0cbdf14505131290b2ae


master02:
kubeadm join 10.10.20.2:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5417b07a197ff4e89af2f16470a01f57402b7d5ab25c244196d472e7f58aea67

kubeadm join 10.10.20.2:6443 --token fp7rmy.wqig1l2qdhfx062o --discovery-token-ca-cert-hash sha256:5417b07a197ff4e89af2f16470a01f57402b7d5ab25c244196d472e7f58aea67 --control-plane --certificate-key 7e411ac4a548b16130fcf708c8ceaa54bcb0923a38cd1e34618efbefb0565799



karmada:
kubeadm join 10.10.10.2:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:8bcde111b2c22f7b417ec769f493e948be94743c1879437374231238202ed391


game.happlelaoganma.cn:
kubeadm join 10.6.6.1:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:894f2fa9212e51d8fe72d62b9906ca3fe647e26584462f31e0a79212bbe9079d

Calico部署

Kubernetes 上的 Calico 快速入门 |Calico 文档

1
2
3
4
5
6
7
8
9
10
11
12
13
ctr -n=k8s.io images import calico-apiserver-3.25.1.tar.gz
ctr -n=k8s.io images import calico-cni-3.25.1.tar.gz
ctr -n=k8s.io images import calico-csi-3.25.1.tar.gz
ctr -n=k8s.io images import calico-kube-controllers-v3.25.1.tar.gz
ctr -n=k8s.io images import calico-node-3.25.1.tar.gz
ctr -n=k8s.io images import calico-pod2daemon-flexvol-3.25.1.tar.gz
ctr -n=k8s.io images import calico-typha-v3.25.1.tar.gz
ctr -n=k8s.io images import calico-node-driver-registrar-v3.25.1.tar.gz
ctr -n=k8s.io images import quay.io-tigera-operator-1.29.3.tar.gz

kubectl create -f tigera-operator.yaml

kubectl create -f custom-resources.yaml

kubectl karmada register 10.10.10.6:32443 –token xx3gla.a3oc06sjmwbzpyl1 –discovery-token-ca-cert-hash sha256:5175f04dee1d3c13e9cd5610d19cf0b729d229fef2402f12096e9e0f3d518cce –cluster-name=’k8s-01’