# 安装部署

# 一、手动部署

# 1. 环境准备

selinux关闭

setenforce 0
vim /etc/selinux/config

SELINUX=disabled

firewalld关闭

systemctl disable firewalld
systemctl stop firewalld

swap关闭

swapoff -a && sysctl -w vm.swappiness=0
vim /etc/fstab

# 注释掉该行
#/dev/mapper/centos-swap  swap                       swap    defaults       0  0

设置docker所需网络参数(仅Node节点)

vim /etc/sysctl.d/k8s.conf

net.ipv4.ip_forward = 1

sysctl -p /etc/sysctl.d/k8s.conf

# 2. 下载安装包

kubernetes:

https://github.com/kubernetes/kubernetes/releases/v1.19.5

etcd:

flannel

https://github.com/flannel-io/flannel/releases/download/v0.13.0/flannel-v0.13.0-linux-amd64.tar.gz

# 3. 安装docker(仅Node节点)

配置镜像加速器

mkdir -p /etc/docker
vim /etc/docker/daemon.json

{
  "registry-mirrors": ["https://x5kd7bwa.mirror.aliyuncs.com"]
}

systemctl daemon-reload
systemctl restart docker

k8s1.24版本开始不再支持docker,转而使用containerd

在这种情况下,可以配置kubelet的启动参数来设置加速器

/usr/bin/kubelet --image-service-endpoint=https://x5kd7bwa.mirror.aliyuncs.com

# 4. 安装etcd(所有节点)

添加配置文件

#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.132:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.132:2379"
ETCD_ENABLE_V2="true"

# [Cluster]
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.2.132:2380,etcd02=http://192.168.2.133:2380,etcd03=http://192.168.2.134:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.133:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.133:2379"
ETCD_ENABLE_V2="true"

# [Cluster]
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.2.132:2380,etcd02=http://192.168.2.133:2380,etcd03=http://192.168.2.134:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.2.134:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.134:2379"
ETCD_ENABLE_V2="true"

# [Cluster]
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.2.132:2380,etcd02=http://192.168.2.133:2380,etcd03=http://192.168.2.134:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

设置守护进程

vi /lib/systemd/system/etcd.service


[Unit]
Description=Etcd Server
After=network.target
After=network-online target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/k8s/etcd/cfg/etcd
ExecStart=/k8s/etcd/bin/etcd
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

systemctl enable etcd
systemctl start etcd

/k8s/etcd/bin/etcdctl member list
/k8s/etcd/bin/etcdctl endpoint health --cluster

# 5. 安装flannel(仅Node节点)

添加配置文件

vim /k8s/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=http://192.168.2.132:2379,http://192.168.2.133:2379,http://192.168.2.134:2379"

添加etcd数据

ETCDCTL_API=2 /k8s/etcd/bin/etcdctl set /coreos.com/network/config '{ "Network": "172.18.0.0/16", "Backend": {"Type": "vxlan"}}'

设置守护进程

vim /lib/systemd/system/flanneld.service


[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/k8s/kubernetes/cfg/flanneld
ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target


systemctl enable flanneld
systemctl start flanneld

# 6. 安装kubeapi-server(仅Master节点)

添加配置文件

vim /k8s/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=http://192.168.2.132:2379,http://192.168.2.133:2379,http://192.168.2.134:2379 \
--address=0.0.0.0 \
--port=8080 \
--advertise-address=192.168.2.132 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"

设置守护进程

vim /lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kube-apiserver
ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

systemctl enable kube-apiserver
systemctl start kube-apiserver

# 7. 安装kube-scheduler(仅Master节点)

添加配置文件

vim /k8s/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"

设置守护进程

vim /lib/systemd/system/kube-scheduler.service


[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kube-scheduler
ExecStart=/k8s/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


systemctl enable kube-scheduler
systemctl start kube-scheduler

# 8. 安装kube-controller-manager(仅Master节点)

添加配置文件

vim /k8s/kubernetes/cfg/kube-controller-manager

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes"

设置守护进程

vim /lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kube-controller-manager
ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target


systemctl enable kube-controller-manager
systemctl start kube-controller-manager

# 9. 安装kubectl

直接使用命令行文件即可

# 10. 安装kubelet(仅Node节点)

添加配置文件

vim /k8s/kubernetes/cfg/kubelet.config

apiVersion: v1
clusters:
	- cluster:
      server: http://192.168.2.132:8080
      name: kubernetes
      clusterDNS: 10.254.0.2
      clusterDomain: cluster.local
kind: Config
vim /k8s/kubernetes/cfg/kubelet

KUBELET_OPTS="--register-node=true \
--hostname-override=192.168.2.133 \
--kubeconfig=/k8s/kubernetes/cfg/kubelet.config \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 \
--logtostderr=true"

设置守护进程

vim /lib/systemd/system/kubelet.service


[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kubelet
ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target


systemctl enable kubelet
systemctl start kubelet

# 11. 安装kube-proxy(仅Node节点)

添加配置文件

vim /k8s/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \
--hostname-override=192.168.2.133 \
--master=http://192.168.2.132:8080"

设置守护进程

vim /lib/systemd/system/kube-proxy.service


[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kube-proxy
ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
wantedBy=multi-user.target


systemctl enable kube-proxy
systemctl start kube-proxy

# 二、使用sealos部署

# 1. 下载并安装sealos

sealos是个golang的二进制工具,直接下载拷贝到bin目录即可, release页面也可下载

wget https://github.com/labring/sealos/releases/download/v4.1.6/sealos_4.1.6_linux_amd64.tar.gz && \
tar -zxvf sealos_4.1.6_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin

# 2. 安装K8s集群

IP_ADDR="192.168.2.132" # FIXME: Your IP address
PASSWORD='password' # FIXME: Your SSH root password
sealos run labring/kubernetes:v1.24.0 labring/calico:v3.22.1 labring/helm:v3.8.2 --masters \$IP_ADDR -p \$PASSWORD

(可选)如果网络很慢,可以使用已下载的镜像进行导入后再执行:

sealos load -i kubernetes.tar

sealos load -i calico.tar

sealos load -i helm.tar

删除集群:sealos reset

# 3. remove kubernetes node taint

kubectl taint node node-role.kubernetes.io/master- node-role.kubernetes.io/control-plane- --all

sealos自带了一个私有仓库在5000端口,默认账号密码:admin / passw0rd

上次更新: 2/13/2025, 3:29:47 AM