kubernetes V1.22.7 集群部署
浏览量:455
1、环境准备
1.1 主机规划

1.2 软件版本

1.3 k8s集群架构图

1.4 网络分配

2、环境准备
2.1 所有机器准备工作
2.1.1 修改主机名
sudo hostnamectl set-hostname xxx
2.1.2 配置hosts文件
sudo vi /etc/hosts 10.191.25.8 master01.px-k8s 10.191.25.9 master02.px-k8s 10.191.25.10 node.px-k8s
2.1.3 关闭防火墙和selinux
sudo systemctl stop filewalld && sudo systemctl disabled filewalld sudo setenforce 0 sudo sed -i 's/^SELINUX=.\*/SELINUX=disabled' /etc/selinux/config #查看结果 sudo sestatus
2.1.4 关闭交换分区
sudo swapoff -a sudo sed -ri 's/.*swap.*/#&/' /etc/fstab sudo echo "vm.swappiness = 0" >> /etc/sysctl.conf sudo sysctl -p
2.1.5 系统优化
#limit优化 sudo ulimit -SHn 65535 sudo cat <<EOF >> /etc/security/limits.conf * soft nofile 819200 * hard nofile 819200 * soft nproc 819200 * hard nproc 819200 * soft memlock unlimited * hard memlock unlimited EOF
2.1.6 安装其他工具(可选)
sudo yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git lrzsz ipvsadm ipset sysstat conntrack libseccomp -y
2.2 搭建etcd集群
2.2.1 配置工作目录
# 在master01上创建工作目录 mkdir -p /data/k8s-work
2.2.2 生成cfssl证书
安装工具
cd /data/k8s-work sudo wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 sudo wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 sudo wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 sudo chmod +x cfssl* sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
配置ca请求文件
sudo tee ca-csr.json <<-'EOF'
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "system"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
sudo /usr/local/bin/cfssl gencert -initca ca-csr.json |sudo /usr/local/bin/cfssljson -bare ca配置ca证书策略
sudo tee ca-config.json <<-'EOF'
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF配置etcd请求csr文件
sudo tee etcd-csr.json <<-'EOF'
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"10.191.25.8",
"10.191.25.9",
"10.191.25.10"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "system"
}]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json |sudo /usr/local/bin/cfssljson -bare etcd
#生成etcd-key.pem etcd.pem2.2.3 部署etcd
下载并分发软件包
sudo wget https://github.com/etcd-io/etcd/releases/download/v3.5.1/etcd-v3.5.1-linux-amd64.tar.gz sudo tar -xvf etcd-v3.5.1-linux-amd64.tar.gz sudo cp -p etcd-v3.5.1-linux-amd64/etcd* /usr/local/bin/ #拷贝到其他master节点 sudo scp etcd-v3.5.1-linux-amd64/etcd* masterIP:/usr/local/bin/
创建配置文件注意区分节点
#master01 sudo tee /etc/etcd/etcd.conf <<-'EOF' #[Member] ETCD_NAME="etcd1" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://10.191.25.8:2380" ETCD_LISTEN_CLIENT_URLS="https://10.191.25.8:2379,http://127.0.0.1:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.191.25.8:2380" ETCD_ADVERTISE_CLIENT_URLS="https://10.191.25.8:2379" ETCD_INITIAL_CLUSTER="etcd1=https://10.191.25.8:2380,etcd2=https://10.191.25.9:2380,etcd3=https://10.191.25.10:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" EOF #master02 sudo tee /etc/etcd/etcd.conf <<-'EOF' #[Member] ETCD_NAME="etcd2" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://10.191.25.9:2380" ETCD_LISTEN_CLIENT_URLS="https://10.191.25.9:2379,http://127.0.0.1:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.191.25.9:2380" ETCD_ADVERTISE_CLIENT_URLS="https://10.191.25.9:2379" ETCD_INITIAL_CLUSTER="etcd1=https://10.191.25.8:2380,etcd2=https://10.191.25.9:2380,etcd3=https://10.191.25.10:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" EOF #master02 sudo tee /etc/etcd/etcd.conf <<-'EOF' #[Member] ETCD_NAME="etcd3" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://10.191.25.10:2380" ETCD_LISTEN_CLIENT_URLS="https://10.191.25.10:2379,http://127.0.0.1:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.191.25.10:2380" ETCD_ADVERTISE_CLIENT_URLS="https://10.191.25.10:2379" ETCD_INITIAL_CLUSTER="etcd1=https://10.191.25.8:2380,etcd2=https://10.191.25.9:2380,etcd3=https://10.191.25.10:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" EOF
创建启动service
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=-/etc/etcd/etcd.conf WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/local/bin/etcd \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-cert-file=/etc/etcd/ssl/etcd.pem \ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-client-cert-auth \ --client-cert-auth Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
各节点创建etcd目录
sudo mkdir -p /etc/etcd sudo mkdir -p /etc/etcd/ssl sudo mkdir -p /var/lib/etcd/default.etcd
拷贝ca证书
sudo \cp -a ca*.pem etcd*.pem /etc/etcd/ssl/ #拷贝到其他节点 scp -r ca*.pem etcd*.pem masterIP:/etc/etcd/ssl/
启动并查看etcd状态
sudo systemctl daemon-reload sudo systemctl enable --now etcd.service sudo systemctl status etcd sudo ETCD_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://10.191.25.8:2379,https://10.191.25.9:2379,https://10.191.25.10:2379 endpoint health +---------------------------+--------+-------------+-------+ | ENDPOINT | HEALTH | TOOK | ERROR | +---------------------------+--------+-------------+-------+ | https://10.191.25.8:2379 | true | 19.338923ms | | | https://10.191.25.10:2379 | true | 20.983839ms | | | https://10.191.25.9:2379 | true | 20.962504ms | | +---------------------------+--------+-------------+-------+
2.3 kubernetes部署
2.3.1 下载并分发安装包
wget https://dl.k8s.io/v1.22.7/kubernetes-server-linux-amd64.tar.gz sudo tar -xvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin/ sudo \cp -a kube-apiserver kube-controller-manager kube-scheduler kubectl kube-proxy /usr/local/bin/ #拷贝到其他节点 scp -r kube-apiserver kube-controller-manager kube-scheduler kubectl kube-proxy masterIP:/usr/local/bin/
2.3.2 所有节点创建工作目录
sudo mkdir -p /etc/kubernetes/ sudo mkdir -p /etc/kubernetes/ssl sudo mkdir -p /var/log/kubernetes
2.3.2 部署api-server
创建apiserver-csr
sudo tee kube-apiserver-csr.json <<-'EOF'
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.191.25.8",
"10.191.25.9",
"10.191.25.10",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "system"
}
]
}
EOF生成证书和token文件
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json |sudo /usr/local/bin/cfssljson -bare kube-apiserver cat > token.csv << EOF $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF
创建配置文件
#master01 sudo tee /etc/kubernetes/kube-apiserver.conf <<-'EOF' KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ --anonymous-auth=false \ --bind-address=10.191.25.8 \ --secure-port=6443 \ --advertise-address=10.191.25.8 \ --insecure-port=0 \ --authorization-mode=Node,RBAC \ --runtime-config=api/all=true \ --enable-bootstrap-token-auth \ --service-cluster-ip-range=10.96.0.0/16 \ --token-auth-file=/etc/kubernetes/token.csv \ --service-node-port-range=30000-50000 \ --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \ --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-issuer=api \ --etcd-cafile=/etc/etcd/ssl/ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --etcd-servers=https://10.191.25.8:2379,https://10.191.25.9:2379,https://10.191.25.10:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --apiserver-count=3 \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/kube-apiserver-audit.log \ --event-ttl=1h \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=4" EOF #master02 sudo tee /etc/kubernetes/kube-apiserver.conf <<-'EOF' KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ --anonymous-auth=false \ --bind-address=10.191.25.9 \ --secure-port=6443 \ --advertise-address=10.191.25.9 \ --insecure-port=0 \ --authorization-mode=Node,RBAC \ --runtime-config=api/all=true \ --enable-bootstrap-token-auth \ --service-cluster-ip-range=10.96.0.0/16 \ --token-auth-file=/etc/kubernetes/token.csv \ --service-node-port-range=30000-50000 \ --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \ --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-issuer=api \ --etcd-cafile=/etc/etcd/ssl/ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --etcd-servers=https://10.191.25.8:2379,https://10.191.25.9:2379,https://10.191.25.10:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --apiserver-count=3 \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/kube-apiserver-audit.log \ --event-ttl=1h \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=4" EOF #master03 sudo tee /etc/kubernetes/kube-apiserver.conf <<-'EOF' KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ --anonymous-auth=false \ --bind-address=10.191.25.10 \ --secure-port=6443 \ --advertise-address=10.191.25.10 \ --insecure-port=0 \ --authorization-mode=Node,RBAC \ --runtime-config=api/all=true \ --enable-bootstrap-token-auth \ --service-cluster-ip-range=10.96.0.0/16 \ --token-auth-file=/etc/kubernetes/token.csv \ --service-node-port-range=30000-50000 \ --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \ --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-issuer=api \ --etcd-cafile=/etc/etcd/ssl/ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --etcd-servers=https://10.191.25.8:2379,https://10.191.25.9:2379,https://10.191.25.10:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --apiserver-count=3 \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/kube-apiserver-audit.log \ --event-ttl=1h \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=4" EOF
创建启动文件
sudo tee /usr/lib/systemd/system/kube-apiserver.service <<-'EOF' [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=etcd.service Wants=etcd.service [Service] EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
拷贝证书和token
sudo \cp -a ca*.pem kube-apiserver*.pem /etc/kubernetes/ssl sudo \cp -a token.csv /etc/kubernetes/ #同步到其他节点 scp -r ca*.pem kube-apiserver*.pem masterIP: /etc/kubernetes/ssl scp -r token.csv masterIP: /etc/kubernetes/ssl
启动apiserver
sudo systemctl daemon-reload sudo systemctl enable --now kube-apiserver sudo systemctl status kube-apiserver # 测试 curl --insecure https://10.191.25.8:6443/ curl --insecure https://10.191.25.9:6443/ curl --insecure https://10.191.25.10:6443/
2.3.3 部署kubelet
创建csr请求文件
sudo tee admin-csr.json <<-'EOF'
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json |sudo /usr/local/bin/cfssljson -bare admin
sudo \cp -a admin*.pem /etc/kubernetes/ssl/kubeconfig配置
kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.191.25.8:6443 --kubeconfig=kube.config kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config kubectl config use-context kubernetes --kubeconfig=kube.config mkdir ~/.kube cp kube.config ~/.kube/config kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
查看集群状态
export KUBECONFIG=$HOME/.kube/config kubectl cluster-info kubectl get componentstatuses kubectl get all --all-namespaces
同步kubectl配置文件到其他节点
scp -r ~/.kube/config masterIP:~/.kube/
配置kubectl子命令补全
yum install -y bash-completion source /usr/share/bash-completion/bash_completion source <(kubectl completion bash) kubectl completion bash > ~/.kube/completion.bash.inc source '/root/.kube/completion.bash.inc' source $HOME/.bash_profile
2.3.4 部署kube-controller-manager
创建csr请求文件
tee kube-controller-manager-csr.json <<-'EOF'
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"10.191.25.8",
"10.191.25.9",
"10.191.25.10"
],
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json |sudo /usr/local/bin/cfssljson -bare kube-controller-manager创建kube-controller-manager的kube-controller-manager.kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.191.25.8:6443 --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
创建配置文件kube-controller-manager.conf
sudo tee kube-controller-manager.conf <<-'EOF' KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \ --secure-port=10257 \ --bind-address=127.0.0.1 \ --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --service-cluster-ip-range=10.96.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --allocate-node-cidrs=true \ --cluster-cidr=172.168.0.0/16 \ --experimental-cluster-signing-duration=876000h \ --root-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \ --leader-elect=true \ --feature-gates=RotateKubeletServerCertificate=true \ --controllers=*,bootstrapsigner,tokencleaner \ --horizontal-pod-autoscaler-sync-period=10s \ --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ --use-service-account-credentials=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2" EOF
创建启动文件
sudo tee /usr/lib/systemd/system/kube-controller-manager.service <<-'EOF' [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
拷贝证书和配置文件并同步到其他节点
sudo \cp -a kube-controller-manager*.pem /etc/kubernetes/ssl/ sudo \cp -a kube-controller-manager.kubeconfig kube-controller-manager.conf /etc/kubernetes/ #拷贝到其他节点 scp -r kube-controller-manager*.pem masterIP:/etc/kubernetes/ssl/ scp -r kube-controller-manager.kubeconfig kube-controller-manager.conf masterIP:/etc/kubernetes/
启动服务
systemctl daemon-reload systemctl enable --now kube-controller-manager systemctl status kube-controller-manager
2.3.5 部署kube-scheduler
创建csr请求文件
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"10.191.25.8",
"10.191.25.9",
"10.191.25.10"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json |sudo /usr/local/bin/cfssljson -bare kube-scheduler创建kube-scheduler的kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.191.25.8:6443 --kubeconfig=kube-scheduler.kubeconfig kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
创建配置文件
tee kube-scheduler.conf <<-'EOF' KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \ --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ --leader-elect=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2" EOF
创建服务启动文件
sudo tee /usr/lib/systemd/system/kube-scheduler.service <<-'EOF' [Unit] Description=Kubernetes Scheduler Documentation= [Service] EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
同步相关文件到各个节点
sudo \cp -a kube-scheduler*.pem /etc/kubernetes/ssl/ sudo \cp -a kube-scheduler.kubeconfig kube-scheduler.conf /etc/kubernetes/ #同步到其他节点 scp -r kube-scheduler*.pem masterIP:/etc/kubernetes/ssl/ scp -r kube-scheduler.kubeconfig kube-scheduler.conf masterIP:/etc/kubernetes/
启动服务
sudo systemctl daemon-reload sudo systemctl enable --now kube-scheduler sudo systemctl status kube-scheduler
2.4 安装docker节点
yum remove -y docker \ docker-client \ docker-client-latest \ docker-common \ docker-latest \ docker-latest-logrotate \ docker-logrotate \ docker-engine \ docker-ce* rm -rf /var/lib/docker wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.9.tgz tar xf docker-20.10.9.tgz sudo cp docker/* /usr/bin sudo tee /usr/lib/systemd/system/docker.service <<-'EOF' [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network-online.target firewalld.service Wants=network-online.target [Service] Type=notify ExecStart=/usr/bin/dockerd ExecReload=/bin/kill -s HUP $MAINPID LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TimeoutStartSec=0 Delegate=yes KillMode=process Restart=on-failure StartLimitBurst=3 StartLimitInterval=60s [Install] WantedBy=multi-user.target EOF sudo systemctl daemon-reload sudo systemctl start docker sudo systemctl enable docker sudo systemctl status docker
添加加速器
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"exec-opts": [
"native.cgroupdriver=systemd"
],
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "1000m",
"max-file": "3"
},
"registry-mirrors": [
"https://36ueyfcw.mirror.aliyuncs.com"
],
"insecure-registries": [],
"selinux-enabled": false
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker2.4 部署kubelet
以下操作在master01上操作 创建kubelet-bootstrap.kubeconfig
BOOTSTRAP_TOKEN=$(sudo awk -F "," '{print $1}' /etc/kubernetes/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.191.25.8:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=~/.kube/config创建配置文件
tee kubelet.json <<-'EOF'
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "10.191.25.8",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOFclusterDNS的配置,后面配置coredns会用到
创建启动文件
sudo tee /usr/lib/systemd/system/kubelet.service <<-'EOF' [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/usr/local/bin/kubelet \ --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/ssl \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --config=/etc/kubernetes/kubelet.json \ --network-plugin=cni \ --rotate-certificates \ --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
同步相关文件到各个节点
sudo \cp -a kubelet.json kubelet-bootstrap.kubeconfig /etc/kubernetes/ #同步到其他节点 scp -r kubelet.json kubelet-bootstrap.kubeconfig masterIP:/etc/kubernetes/
在各个节点执行
sudo mkdir -p /var/lib/kubelet sudo mkdir -p /var/log/kubernetes sudo systemctl daemon-reload sudo systemctl enable --now kubelet sudo systemctl status kubelet
确认kubelet服务启动成功后,接着到master上Approve一下bootstrap请求。
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
查看一下node是否加入成功
kubectl get nodes2.5 部署kube-proxy
创建csr请求文件
cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "system"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json |sudo /usr/local/bin/cfssljson -bare kube-proxy创建kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.191.25.8:6443 --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
创建kube-proxy配置文件
cat > kube-proxy.yaml << "EOF" apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 10.191.25.8 clientConnection: kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig clusterCIDR: 172.168.0.0/16 healthzBindAddress: 10.191.25.8:10256 kind: KubeProxyConfiguration metricsBindAddress: 10.191.25.8:10249 mode: "ipvs" EOF
注意修改节点IP
创建服务启动文件
sudo tee /usr/lib/systemd/system/kube-proxy.service <<-'EOF' [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/usr/local/bin/kube-proxy \ --config=/etc/kubernetes/kube-proxy.yaml \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
同步文件到各个节点
sudo \cp -a kube-proxy*.pem /etc/kubernetes/ssl/ sudo \cp -a kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/ #拷贝到其他节点 scp -r kube-proxy*.pem masterIP:/etc/kubernetes/ssl/ scp -r kube-proxy.kubeconfig kube-proxy.yaml masterIP:/etc/kubernetes/
启动服务
sudo mkdir -p /var/lib/kube-proxy sudo systemctl daemon-reload sudo systemctl enable --now kube-proxy systemctl status kube-proxy
2.6 部署网络组件
2.6.1 部署calico
wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml kubectl apply -f calico.yaml
查看状态,各个节点,均为Ready状态
$ kubectl get nodes NAME STATUS ROLES AGE VERSION wx-ceshi-oadj-1.novalocal Ready <none> 15m v1.22.7 wx-ceshi-oadj-2.novalocal Ready <none> 14m v1.22.7 wx-oa-dangjian-ceshi.novalocal Ready <none> 14m v1.22.7
2.6.2 部署coredns
cat > coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
kubectl apply -f coredns.yamlclusterIP为:10.96.0.2(kubelet配置文件中的clusterDNS)
部署nginx验证
cat > nginx.yaml << "EOF" --- apiVersion: v1 kind: ReplicationController metadata: name: nginx-controller spec: replicas: 2 selector: name: nginx template: metadata: labels: name: nginx spec: containers: - name: nginx image: nginx:1.19.6 ports: - containerPort: 80 --- apiVersion: v1 kind: Service metadata: name: nginx-service-nodeport spec: ports: - port: 80 targetPort: 80 nodePort: 30001 protocol: TCP type: NodePort selector: name: nginx EOF kubectl apply -f nginx.yaml kubectl get svc kubectl get pods -o wide kubectl delete -f nginx.yaml

神回复
发表评论:
◎欢迎参与讨论,请在这里发表您的看法、交流您的观点。