不同厂商云服务器公网IP部署k8s集群(腾讯云+阿里云)

不同厂商云服务器公网IP部署k8s集群(腾讯云+阿里云)



前言

  最近在学习k8s,之前自己有两台腾讯云的轻量服务器,内网ip搭建k8s集群成功。但是随着学习的深入,觉得两台服务器不能满足现有学习需求,于是薅起了羊毛,申请了两台免费三个月试用的阿里云服务器、由于不同厂商的云服务器内网ip不同,故选择尝试用公网ip的方式来搭建集群,以此记录我的搭建过程。

一、安装kubeadm

1、基础环境搭建

在每台服务上执行如下操作,以master为例:

	#各个机器设置自己的域名  重启系统后生效
	hostnamectl set-hostname master
	
	# 重启服务器
	reboot
	
	# 设置hosts 
	# 我这里是四台服务器 分别设置公网ip以及对应的主机名称
	cat >> /etc/hosts <<EOF
	101.42.xxx.xxx master
	62.234.xxx.xxx node01
	123.57.xxx.xxx node03
	39.105.xxx.xxx node04
	EOF
	
	systemctl restart NetworkManager.service
	
	# 开启ip转发
	echo "1" >> /proc/sys/net/ipv4/ip_forward 
	# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
	sudo setenforce 0
	sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
	
	#关闭swap
	swapoff -a  
	sed -ri 's/.*swap.*/#&/' /etc/fstab
	
	#允许 iptables 检查桥接流量
	cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
	br_netfilter
	EOF
	
	cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
	net.bridge.bridge-nf-call-ip6tables = 1
	net.bridge.bridge-nf-call-iptables = 1
	EOF
	sudo sysctl --system
	
	# 开启内核支持
	cat >> /etc/sysctl.conf << EOF
	net.ipv4.ip_forward = 1
	net.bridge.bridge-nf-call-iptables = 1
	net.bridge.bridge-nf-call-ip6tables = 1
	EOF
	
	sysctl -p
	
	# 开启ipvs支持
	yum -y install ipvsadm  ipset
	
	 
	# 永久生效
	cat > /etc/sysconfig/modules/ipvs.modules <<EOF
	modprobe -- ip_vs
	modprobe -- ip_vs_rr
	modprobe -- ip_vs_wrr
	modprobe -- ip_vs_sh
	modprobe -- nf_conntrack_ipv4
	EOF

2、公网搭建集群的关键

	# 创建虚拟网卡 
	cat > /etc/sysconfig/network-scripts/ifcfg-eth0:1 <<EOF
	BOOTPROTO=static
	DEVICE=eth0:1
	IPADDR=39.105.xxx.xxx  # 你的公网ip
	PREFIX=32
	TYPE=Ethernet
	USERCTL=no
	ONBOOT=yes
	EOF

	# 重启网络
	systemctl restart network

3、安装kubelet、kubeadm、kubectl

	cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
	[kubernetes]
	name=Kubernetes
	baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
	enabled=1
	gpgcheck=0
	repo_gpgcheck=0
	gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
	   http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
	exclude=kubelet kubeadm kubectl
	EOF

二、使用kubeadm引导集群

1、下载各个节点所需镜像

在master节点上执行即可

	sudo tee ./images.sh <<-'EOF'
	#!/bin/bash
	images=(
	kube-apiserver:v1.20.9
	kube-proxy:v1.20.9
	kube-controller-manager:v1.20.9
	kube-scheduler:v1.20.9
	coredns:1.7.0
	etcd:3.4.13-0
	pause:3.2
	)
	for imageName in ${images[@]} ; do
	docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
	done
	EOF
	   
	chmod +x ./images.sh && ./images.sh

2、初始化主节点

在master上执行

# 主节点初始化
	kubeadm init \
	--apiserver-advertise-address=101.42.xxxx \
	--control-plane-endpoint=master \
	--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
	--kubernetes-version v1.20.9 \
	--service-cidr=10.96.0.0/16 \
	--pod-network-cidr=192.168.0.0/16

安装成功

	Your Kubernetes control-plane has initialized successfully!
	
	To start using your cluster,you need to run the following as a regular user:
	
	  mkdir -p $HOME/.kube
	  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	  sudo chown $(id -u):$(id -g) $HOME/.kube/config
	
	Alternatively,if you are the root user,you can run:
	
	  export KUBECONFIG=/etc/kubernetes/admin.conf
	
	You should now deploy a pod network to the cluster.
	Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	  https://kubernetes.io/docs/concepts/cluster-administration/addons/
	
	You can now join any number of control-plane nodes by copying certificate authorities
	and service account keys on each node and then running the following as root:
	
	  kubeadm join master:6443 --token ymmfzm.q9bsuda2xh3rj72y \
	    --discovery-token-ca-cert-hash sha256:ceb13c0ab88888bf9136c98db076b49cbbb9dcc8f122b4d8284d3985733f1ebb \
	    --control-plane
	
	Then you can join any number of worker nodes by running the following on each as root:
	
	kubeadm join master:6443 --token ymmfzm.q9bsuda2xh3rj72y \
	    --discovery-token-ca-cert-hash sha256:ceb13c0ab88888bf9136c98db076b49cbbb9dcc8f122b4d8284d3985733f1ebb

设置.kube/config

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

3、安装网络组件

	curl https://docs.projectcalico.org/v3.20/manifests/calico.yaml -O
	
	vi calico.yaml
	# 修改以下内容
	
	# /CALICO_IPV4POOL_CIDR 进行查找,取消注释,将值改为pod-network-cidr的值
	# The default IPv4 pool to create on startup if none exists. Pod IPs will be
	# chosen from this range. Changing this value after installation will have
	# no effect. This should fall within `--cluster-cidr`.
	 - name: CALICO_IPV4POOL_CIDR
	   value: "10.244.0.0/16"
	
	# ....
	# /k8s,bgp 查找,同级新增如下
	 - name: CLUSTER_TYPE
	   value: "k8s,bgp"
	 - name: IP_AUTODETECTION_METHOD
	   value: "interface=eth0"    # eth0为你网卡的名字

	# 启动calico
	kubectl apply -f calico.yaml

4、加入node节点

kubeadm join master:6443 --token ymmfzm.q9bsuda2xh3rj72y \
	    --discovery-token-ca-cert-hash sha256:ceb13c0ab88888bf9136c98db076b49cbbb9dcc8f122b4d8284d3985733f1ebb    

5、验证集群状态

kubectl get nodes -o wide

[root@maste ~]# kubectl get nodes -o wide
NAME     STATUS                     ROLES                  AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
master   Ready                      control-plane,master   19h   v1.20.9   101.42.xxx.xxx   <none>        CentOS Linux 7 (Core)   3.10.0-1160.11.1.el7.x86_64   docker://20.10.5
node01   Ready                      <none>                 18h   v1.20.9   62.234.xxx.xxx   <none>        CentOS Linux 7 (Core)   3.10.0-1160.11.1.el7.x86_64   docker://20.10.5
node03   Ready,SchedulingDisabled   <none>                 19h   v1.20.9   123.57.xxx.xxx   <none>        CentOS Linux 7 (Core)   3.10.0-957.21.3.el7.x86_64    docker://20.10.5
node04   Ready,SchedulingDisabled   <none>                 18h   v1.20.9   39.105.xxx.xxx   <none>        CentOS Linux 7 (Core)   3.10.0-957.21.3.el7.x86_64    docker://20.10.5

6、部署dashboard

1)部署

	kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

2)设置访问端口

	kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

修改type为NodePort

  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 32249
    port: 443
    protocol: TCP
    targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}

3)查询端口 (记得更改防火墙规则)

kubectl get svc -A |grep kubernetes-dashboard

[root@maste ~]# kubectl get svc -A |grep kubernetes-dashboard
kubernetes-dashboard   dashboard-metrics-scraper            ClusterIP   10.96.122.67    <none>        8000/TCP                     18h
kubernetes-dashboard   kubernetes-dashboard                 NodePort    10.96.152.132   <none>        443:32249/TCP                18h

4)创建访问账号

创建文件

	vi dash.yaml

复制到dash.yaml中

	apiVersion: v1
	kind: ServiceAccount
	metadata:
	  name: admin-user
	  namespace: kubernetes-dashboard
	---
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  name: admin-user
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: cluster-admin
	subjects:
	- kind: ServiceAccount
	  name: admin-user
	  namespace: kubernetes-dashboard

执行命令

	kubectl apply -f dash.yaml

获取令牌访问

#获取访问令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"

5)界面

在这里插入图片描述

相关文章

文章浏览阅读942次。kube-controller-manager 和 kubelet 是...
文章浏览阅读3.8k次。上篇文章详细介绍了弹性云混部的落地历...
文章浏览阅读897次。对于cpu来说,这种分配方式并不会有太大...
文章浏览阅读796次,点赞17次,收藏15次。只要在Service定义...
文章浏览阅读763次。但是此时如果配置成 NONE, 租户创建成功...
文章浏览阅读2.7k次,点赞2次,收藏13次。公司使用的是交老的...