您当前的位置: 首页 > 慢生活 > 程序人生 网站首页程序人生
68、Kubernetes - 高可用的 K8S 构建(3)_]
发布时间:2023-02-07 22:51:07编辑:雪饮阅读()
master01,master02,master03都执行
Step1
mkdir -p /usr/local/kubernetes/install
master01节点单独上传
拷贝E:\12、Kubernetes - 高可用的 K8S 集群构建\鸿鹄论坛_12、Kubernetes - 高可用的 K8S 集群构建\2、资料\高可用构建所需镜像.zip
中的文件haproxy.tar、keepalived.tar、kubeadm-basic.images.tar.gz、load-images.sh、start.keep.tar.gz
拷贝到这个新建的目录中
然后修改
load-images.sh
给各节点复制过去
scp /usr/local/kubernetes/install/* root@k8s-master02:/usr/local/kubernetes/install/
scp /usr/local/kubernetes/install/* root@k8s-master03:/usr/local/kubernetes/install/
Step2
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts":["native.cgroupdriver=systemd"],
"log-driver":"json-file",
"log-opts":{
"max-size":"100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
docker load -i /usr/local/kubernetes/install/haproxy.tar
docker load -i /usr/local/kubernetes/install/keepalived.tar
cd /usr/local/kubernetes/install/
tar -zxvf kubeadm-basic.images.tar.gz
Step3
然后master修改load-images.sh
vi /usr/local/kubernetes/install/load-images.sh
#!/bin/bash
cd /usr/local/kubernetes/install/kubeadm-basic.images
ls /usr/local/kubernetes/install/kubeadm-basic.images | grep -v load-images.sh > /tmp/k8s-images.txt
for i in $( cat /tmp/k8s-images.txt )
do
docker load -i $i
done
rm -rf /tmp/k8s-images.txt
然后也同步到另外两个节点
scp /usr/local/kubernetes/install/load-images.sh root@k8s-master02:/usr/local/kubernetes/install/
scp /usr/local/kubernetes/install/load-images.sh root@k8s-master03:/usr/local/kubernetes/install/
Step4
然后各节点都执行
/usr/local/kubernetes/install/load-images.sh
Step5
接下来是master01单独的
cd /usr/local/kubernetes/install/
tar -zxvf start.keep.tar.gz
mv data/ /
cd /data/lb/
vi etc/haproxy.cfg
配置如
[root@k8s-master01 lb]# cat etc/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
frontend stats-front
bind *:8081
mode http
default_backend stats-back
frontend fe_k8s_6444
bind *:6444
mode tcp
timeout client 1h
log global
option tcplog
default_backend be_k8s_6443
acl is_websocket hdr(Upgrade) -i WebSocket
acl is_websocket hdr_beg(Host) -i ws
backend stats-back
mode http
balance roundrobin
stats uri /haproxy/stats
stats auth pxcstats:secret
backend be_k8s_6443
mode tcp
timeout queue 1h
timeout server 1h
timeout connect 1h
log global
balance roundrobin
server rancher01 192.168.66.10:6443
server rancher02 192.168.66.21:6443
server rancher02 192.168.66.22:6443
然后备份下
cp etc/haproxy.cfg /root/
然后再就是只留一个rancher01的这个配置
[root@k8s-master01 lb]# cat etc/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
frontend stats-front
bind *:8081
mode http
default_backend stats-back
frontend fe_k8s_6444
bind *:6444
mode tcp
timeout client 1h
log global
option tcplog
default_backend be_k8s_6443
acl is_websocket hdr(Upgrade) -i WebSocket
acl is_websocket hdr_beg(Host) -i ws
backend stats-back
mode http
balance roundrobin
stats uri /haproxy/stats
stats auth pxcstats:secret
backend be_k8s_6443
mode tcp
timeout queue 1h
timeout server 1h
timeout connect 1h
log global
balance roundrobin
server rancher01 192.168.66.10:6443
然后再就是修改这个脚本如
[root@k8s-master01 lb]# cat start-haproxy.sh
#!/bin/bash
MasterIP1=192.168.66.10
MasterIP2=192.168.66.20
MasterIP3=192.168.66.21
MasterPort=6443
docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
-e MasterIP1=$MasterIP1 \
-e MasterIP2=$MasterIP2 \
-e MasterIP3=$MasterIP3 \
-e MasterPort=$MasterPort \
-v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
wise2c/haproxy-k8s
然后再就是执行呗
[root@k8s-master01 lb]# ./start-haproxy.sh
f1581f3b7e70715d49dd09a609e7497557db7caf81bb8608de37939f0b5f4f96
然后再就是配置这个脚本如
[root@k8s-master01 lb]# cat start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=192.168.66.100
INTERFACE=ens33
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18
docker run -itd --restart=always --name=Keepalived-K8S \
--net=host --cap-add=NET_ADMIN \
-e VIRTUAL_IP=$VIRTUAL_IP \
-e INTERFACE=$INTERFACE \
-e CHECK_PORT=$CHECK_PORT \
-e RID=$RID \
-e VRID=$VRID \
-e NETMASK_BIT=$NETMASK_BIT \
-e MCAST_GROUP=$MCAST_GROUP \
wise2c/keepalived-k8s
然后这个也执行呗
[root@k8s-master01 lb]# ./start-keepalived.sh
d79e212f7120c06895dcb5c516b283442adf3b1f68eada47d2398135ab6e1ffd
然后好像就是ens33这里多出现一个192.168.66.100/24就算ok了
[root@k8s-master01 lb]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:cc:99:77 brd ff:ff:ff:ff:ff:ff
inet 192.168.66.10/24 brd 192.168.66.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.66.100/24 scope global secondary ens33
valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:fc:a9:51:94 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
5: vethd2a4541@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 5e:07:80:a1:2c:ee brd ff:ff:ff:ff:ff:ff link-netnsid 0
Step6
也是master01单独执行
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
cd /usr/local/kubernetes/install
mkdir images
mv * images/
cp /root/kubeadm-config.yaml ./
基于之前普通集群时候的这个配置的修改
[root@k8s-master01 install]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.66.10
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.66.100:6444"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
podSubnet: 10.224.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
多了一个controlPlaneEndpoint: "192.168.66.100:6444"而已
然后初始化遭遇新的错误
[root@k8s-master01 install]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
unknown flag: --experimental-upload-certs
To see the stack trace of this error execute with --v=5 or higher
这边建议是卸载
yum remove -y kubelet kubeadm kubectl
然后安装我们之前初始化普通集群的时候的这个版本
yum install -y kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1
再次初始化就成功
[root@k8s-master01 install]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
Flag --experimental-upload-certs has been deprecated, use --upload-certs instead
[init] Using Kubernetes version: v1.15.1
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 23.0.0. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.66.10 192.168.66.100]
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.66.10 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.66.10 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 25.010965 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
c03402f8c9dfc85aa3d7fd7087f3f80da984fc61f7e97c65a4dfe99bd5224c0f
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.66.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:43ccc6fbddb8f78383fb6985f54f1d455e0204f0ad6da6c01b3180960a2f42c5 \
--control-plane --certificate-key c03402f8c9dfc85aa3d7fd7087f3f80da984fc61f7e97c65a4dfe99bd5224c0f
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.66.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:43ccc6fbddb8f78383fb6985f54f1d455e0204f0ad6da6c01b3180960a2f42c5
这次可以发现结束日志里面有两个加入节点的示例命令
我认为这一个是高可用节点的添加,一个是普通node节点的添加
然后按照提示
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
关键字词:Kubernetes,高可用,K8S,构建
相关文章
- 67、Kubernetes - 高可用的 K8S 构建(2)_]
- 64、Kubernetes - Helm 及其它功能性组件 - EFK 日志_
- 66、Kubernetes - 高可用的 K8S 构建(1)_]
- 65、Kubernetes - 证书可用年限修改_
- 64、Kubernetes - Helm 及其它功能性组件 - EFK 日志_
- no available release name found问题排查
- 56、Kubernetes - 安全 鉴权(3]
- 53、Kubernetes - 安全 认证
- 23、Kubernetes - 资源清单 - start、stop、相位
- 22、Kubernetes - 资源清单 - 探针