https://github.com/qist/k8s
#支持 Ubuntu 18及以上的系統(tǒng),CentOS7及CentOS8 系統(tǒng)
# k8s 版本 14,15,16,17 號(hào)版本
#Ubuntu 系列安裝
apt -y install ansible
#CentOS 8 安裝
dnf -y install ansible
# CentOS 7 安裝
yum -y install ansible
# 修改ansible 配置
## 說明id_rsa_storm1 私鑰名字請(qǐng)自行修改
sed -i 's/^#private_key_file =.*$/private_key_file =\/root\/.ssh\/id_rsa_storm1/g' /etc/ansible/ansible.cfg
sed -i 's/^#sudo_user = root/sudo_user = root/g' /etc/ansible/ansible.cfg
sed -i 's/^#remote_port = 22/remote_port = 22/g' /etc/ansible/ansible.cfg
sed -i 's/^#host_key_checking = False/host_key_checking = False/g' /etc/ansible/ansible.cfg
sed -i '/\[ssh_connection\]/a\ssh_args = -o ControlMaster=no' /etc/ansible/ansible.cfg
# 配置CFSSL編譯環(huán)境 一點(diǎn)要在跨系統(tǒng)部署請(qǐng)?jiān)诘桶姹鞠戮幾g會(huì)用到lib庫(kù)高版本編譯在低版本上運(yùn)行要升級(jí)庫(kù)
#Ubuntu 系列安裝
apt -y install gcc git
#CentOS 8 安裝
dnf -y install gcc git
# CentOS 7 安裝
yum -y install gcc git
# 配置go 編譯環(huán)境
# 下載go 語言
wget -P /usr/local/src/ https://dl.google.com/go/go1.12.14.linux-amd64.tar.gz
# 解壓
tar -xf /usr/local/src/go1.12.14.linux-amd64.tar.gz -C /usr/local/
# 配置環(huán)境變量
cat << EOF >> /etc/profile
export GOPATH=/root/go
export GOBIN=/root/go/bin
PATH=\$PATH:/usr/local/go/bin:\$HOME/bin:\$GOBIN
export PATH
EOF
# 生效環(huán)境變量
source /etc/profile
# 編譯 CFSSL
go get github.com/cloudflare/cfssl/cmd/cfssl
go get github.com/cloudflare/cfssl/cmd/cfssljson
#查看cfssl 是否安裝成功
cfssl version
# 已經(jīng)編譯好的二進(jìn)制文件
wget -P /tmp/ https://github.com/qist/lxcfs/releases/download/cfssl/cfssl.tar.gz
# 解壓下載好文件
tar -xf /tmp/cfssl.tar.gz -C /usr/bin/
# 刪除下載的壓縮包
rm -rf /tmp/cfssl.tar.gz
wget -P /tmp/ https://storage.googleapis.com/kubernetes-release/release/v1.14.10/kubernetes-client-linux-amd64.tar.gz
# 解壓
tar -xf /tmp/kubernetes-client-linux-amd64.tar.gz -C /tmp/
# cp 文件到/usr/bin
mv /tmp/kubernetes/client/bin/kubectl /usr/bin/
# 驗(yàn)證是否能執(zhí)行
kubectl version
# 刪除 沒用文件
rm -rf /tmp/kubernetes-client-linux-amd64.tar.gz
rm -rf /tmp/kubernetes
cd /opt
git clone https://github.com/qist/k8s.git
打開kubernetes.v1.17.sh 查看下載路徑 版本可以自行選擇
# 創(chuàng)建壓縮包存放目錄
mkdir /tmp/source
# 下載K8S 集群所需壓縮包
#docker 下載
wget -P /tmp/source https://download.docker.com/linux/static/stable/x86_64/docker-19.03.5.tgz
#lxcfs 下載
wget -P /tmp/source https://github.com/qist/lxcfs/releases/download/3.1.2/lxcfs-3.1.2.tar.gz
# cni 下載
wget -P /tmp/source https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz
# etcd 下載
wget -P /tmp/source https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
# 下載kubernetes server 壓縮包
wget -P /tmp/source https://storage.googleapis.com/kubernetes-release/release/v1.17.0/kubernetes-server-linux-amd64.tar.gz
# 下載haproxy
wget -P /tmp/source https://www.haproxy.org/download/2.1/src/haproxy-2.1.1.tar.gz
# automake keepalived編譯用到
wget -P /tmp/source https://ftp.gnu.org/gnu/automake/automake-1.15.1.tar.gz
# 下載keepalived
wget -P /tmp/source https://www.keepalived.org/software/keepalived-2.0.19.tar.gz
# iptables centos7 及Ubuntu18號(hào)版本用到
wget -P /tmp/source https://www.netfilter.org/projects/iptables/files/iptables-1.6.2.tar.bz2
# 應(yīng)用部署目錄 可根據(jù)自己環(huán)境修改
TOTAL_PATH=/apps
ETCD_PATH=$TOTAL_PATH/etcd
# 大規(guī)模集群部署時(shí)建議分開存儲(chǔ),WAL 最好ssd
ETCD_DATA_DIR=$TOTAL_PATH/etcd/data/default.etcd
ETCD_WAL_DIR=$TOTAL_PATH/etcd/data/default.etcd
K8S_PATH=$TOTAL_PATH/k8s
POD_MANIFEST_PATH=$TOTAL_PATH/work
DOCKER_PATH=$TOTAL_PATH/docker
#DOCKER_BIN_PATH=$TOTAL_PATH/docker/bin #ubuntu 18 版本必須設(shè)置在/usr/bin 目錄下面
DOCKER_BIN_PATH=/usr/bin
CNI_PATH=$TOTAL_PATH/cni
SOURCE_PATH=/usr/local/src # 遠(yuǎn)程服務(wù)器源碼存放目錄
KEEPALIVED_PATH=$TOTAL_PATH/keepalived
HAPROXY_PATH=$TOTAL_PATH/haproxy
# 設(shè)置工作端目錄
HOST_PATH=`pwd`
# 設(shè)置工作端壓縮包所在目錄
TEMP_PATH=/tmp/source
#應(yīng)用版本號(hào)
ETCD_VERSION=v3.3.18
K8S_VERSION=v1.17.0
LXCFS_VERSION=3.1.2
DOCKER_VERSION=19.03.5
CNI_VERSION=v0.8.3
IPTABLES_VERSION=1.6.2 #centos7,ubuntu18 版本需要升級(jí) centos8, ubuntu19 不用升級(jí)
KEEPALIVED_VERSION=2.0.19
AUTOMAKE_VERSION=1.15.1 #KEEPALIVED 編譯依賴使用
HAPROXY_VERSION=2.1.1
# 網(wǎng)絡(luò)插件 選擇 1、kube-router 2、kube-proxy+flannel 使用kube-router時(shí)external-ip 同網(wǎng)段不通需要做路由,kube-proxy 可以直接訪問
NET_PLUG=1
# 節(jié)點(diǎn)間互聯(lián)網(wǎng)絡(luò)接口名稱flannel 指定網(wǎng)絡(luò)接口
IFACE="eth0"
# K8S api 網(wǎng)絡(luò)互聯(lián)接口 多網(wǎng)卡請(qǐng)指定接口ansible_網(wǎng)卡接口名字.ipv4.address
API_IPV4=ansible_default_ipv4.address
# kubelet pod 網(wǎng)絡(luò)互聯(lián)接口 ansible_${IFACE}.ipv4.address 單網(wǎng)卡使用ansible_default_ipv4.address 多個(gè)網(wǎng)卡請(qǐng)指定使用的網(wǎng)卡名字
KUBELET_IPV4=ansible_default_ipv4.address
# 證書相關(guān)配置
CERT_ST="GuangDong"
CERT_L="GuangZhou"
CERT_O="k8s"
CERT_OU="Qist"
CERT_PROFILE="kubernetes"
#數(shù)字證書時(shí)間及kube-controller-manager 簽發(fā)證書時(shí)間
EXPIRY_TIME="87600h"
# K8S ETCD存儲(chǔ) 目錄名字
ETCD_PREFIX="/registry"
# 配置etcd集群參數(shù)
#ETCD_SERVER_HOSTNAMES="\"k8s-master-01\",\"k8s-master-02\",\"k8s-master-03\""
#ETCD_SERVER_IPS="\"192.168.2.247\",\"192.168.2.248\",\"192.168.2.249\""
ETCD_MEMBER_1_IP="192.168.2.247"
ETCD_MEMBER_1_HOSTNAMES="k8s-master-01"
ETCD_MEMBER_2_IP="192.168.2.248"
ETCD_MEMBER_2_HOSTNAMES="k8s-master-02"
ETCD_MEMBER_3_IP="192.168.2.249"
ETCD_MEMBER_3_HOSTNAMES="k8s-master-03"
ETCD_SERVER_HOSTNAMES="\"${ETCD_MEMBER_1_HOSTNAMES}\",\"${ETCD_MEMBER_2_HOSTNAMES}\",\"${ETCD_MEMBER_3_HOSTNAMES}\""
ETCD_SERVER_IPS="\"${ETCD_MEMBER_1_IP}\",\"${ETCD_MEMBER_2_IP}\",\"${ETCD_MEMBER_3_IP}\""
# etcd 集群間通信的 IP 和端口
INITIAL_CLUSTER="${ETCD_MEMBER_1_HOSTNAMES}=https://${ETCD_MEMBER_1_IP}:2380,${ETCD_MEMBER_2_HOSTNAMES}=https://${ETCD_MEMBER_2_IP}:2380,${ETCD_MEMBER_3_HOSTNAMES}=https://${ETCD_MEMBER_3_IP}:2380"
# etcd 集群服務(wù)地址列表
ENDPOINTS=https://${ETCD_MEMBER_1_IP}:2379,https://${ETCD_MEMBER_2_IP}:2379,https://${ETCD_MEMBER_3_IP}:2379
#K8S events 存儲(chǔ)ETCD 集群 1開啟 默認(rèn)關(guān)閉0
K8S_EVENTS=0
if [ ${K8S_EVENTS} == 1 ]; then
# etcd events集群配置
#ETCD_EVENTS_HOSTNAMES="\"k8s-node-01\",\"k8s-node-02\",\"k8s-node-03\""
#ETCD_EVENTS_IPS="\"192.168.2.250\",\"192.168.2.251\",\"192.168.2.252\""
ETCD_EVENTS_MEMBER_1_IP="192.168.2.250"
ETCD_EVENTS_MEMBER_1_HOSTNAMES="k8s-node-01"
ETCD_EVENTS_MEMBER_2_IP="192.168.2.251"
ETCD_EVENTS_MEMBER_2_HOSTNAMES="k8s-node-02"
ETCD_EVENTS_MEMBER_3_IP="192.168.2.252"
ETCD_EVENTS_MEMBER_3_HOSTNAMES="k8s-node-03"
ETCD_EVENTS_HOSTNAMES="\"${ETCD_EVENTS_MEMBER_1_HOSTNAMES}\",\"${ETCD_EVENTS_MEMBER_2_HOSTNAMES}\",\"${ETCD_EVENTS_MEMBER_3_HOSTNAMES}\""
ETCD_EVENTS_IPS="\"${ETCD_EVENTS_MEMBER_1_IP}\",\"${ETCD_EVENTS_MEMBER_2_IP}\",\"${ETCD_EVENTS_MEMBER_3_IP}\""
# etcd 集群間通信的 IP 和端口
INITIAL_EVENTS_CLUSTER="${ETCD_EVENTS_MEMBER_1_HOSTNAMES}=https://${ETCD_EVENTS_MEMBER_1_IP}:2380,${ETCD_EVENTS_MEMBER_2_HOSTNAMES}=https://${ETCD_EVENTS_MEMBER_2_IP}:2380,${ETCD_EVENTS_MEMBER_3_HOSTNAMES}=https://${ETCD_EVENTS_MEMBER_3_IP}:2380"
ENDPOINTS="${ENDPOINTS} --etcd-servers-overrides=/events#https://${ETCD_EVENTS_MEMBER_1_IP}:2379;https://${ETCD_EVENTS_MEMBER_2_IP}:2379;https://${ETCD_EVENTS_MEMBER_3_IP}:2379"
fi
#是否開啟docker0 網(wǎng)卡 參數(shù): doakcer0 none k8s集群建議不用開啟,單獨(dú)部署請(qǐng)?jiān)O(shè)置值為docker0
NET_BRIDGE="none"
# 配置K8S集群參數(shù)
# 最好使用 當(dāng)前未用的網(wǎng)段 來定義服務(wù)網(wǎng)段和 Pod 網(wǎng)段
# 服務(wù)網(wǎng)段,部署前路由不可達(dá),部署后集群內(nèi)路由可達(dá)(kube-proxy 保證)
SERVICE_CIDR="10.66.0.0/16"
# Pod 網(wǎng)段,建議 /12 段地址,部署前路由不可達(dá),部署后集群內(nèi)路由可達(dá)(網(wǎng)絡(luò)插件 保證)
CLUSTER_CIDR="10.80.0.0/12"
# 服務(wù)端口范圍 (NodePort Range)
NODE_PORT_RANGE="30000-65535"
# kubernetes 服務(wù) IP (一般是 SERVICE_CIDR 中第一個(gè)IP)
CLUSTER_KUBERNETES_SVC_IP="10.66.0.1"
# 集群名字
CLUSTER_NAME=kubernetes
#集群域名
CLUSTER_DNS_DOMAIN="cluster.local"
#集群DNS
CLUSTER_DNS_SVC_IP="10.66.0.2"
# k8s vip ip
K8S_VIP_1_IP="192.168.3.12"
K8S_VIP_2_IP="192.168.3.13"
K8S_VIP_3_IP="192.168.3.14"
K8S_VIP_DOMAIN="api.k8s.niuke.tech"
#kube-apiserver port
SECURE_PORT=5443
# kube-apiserver vip port # 如果配置vip IP 請(qǐng)?jiān)O(shè)置
K8S_VIP_PORT=6443
# kube-apiserver vip ip
KUBE_APISERVER="https://${K8S_VIP_DOMAIN}:${K8S_VIP_PORT}"
# RUNTIME_CONFIG v1.16 版本設(shè)置 低于v1.16 RUNTIME_CONFIG="api/all=true" 即可
RUNTIME_CONFIG="api/all=true"
#開啟插件enable-admission-plugins #AlwaysPullImages 啟用istio 不能自動(dòng)注入需要手動(dòng)執(zhí)行注入
ENABLE_ADMISSION_PLUGINS="DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
#禁用插件disable-admission-plugins
DISABLE_ADMISSION_PLUGINS="DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy"
# 設(shè)置api 副本數(shù)
APISERVER_COUNT="3"
# 設(shè)置輸出日志級(jí)別
LEVEL_LOG="2"
# api 突變請(qǐng)求大數(shù)
MAX_MUTATING_REQUESTS_INFLIGHT="500"
# api 非突變請(qǐng)求的大數(shù)目
MAX_REQUESTS_INFLIGHT="1500"
# 內(nèi)存配置選項(xiàng)和node數(shù)量的關(guān)系,單位是MB: target-ram-mb=node_nums * 60
TARGET_RAM_MB="6000"
# kube-api-qps 默認(rèn)50
KUBE_API_QPS="100"
#kube-api-burst 默認(rèn)30
KUBE_API_BURST="100"
# pod-infra-container-image 地址
POD_INFRA_CONTAINER_IMAGE="docker.io/juestnow/pause-amd64:3.1"
# max-pods node 節(jié)點(diǎn)啟動(dòng)最多pod 數(shù)量
MAX_PODS=100
# 生成 EncryptionConfig 所需的加密 key
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
#kube-apiserver 服務(wù)器IP列表 有更多的節(jié)點(diǎn)時(shí)請(qǐng)?zhí)砑覫P K8S_APISERVER_VIP="\"192.168.2.247\",\"192.168.2.248\",\"192.168.2.249\",\"192.168.2.250\",\"192.168.2.251\""
K8S_APISERVER_VIP="\"192.168.2.247\",\"192.168.2.248\",\"192.168.2.249\""
# 創(chuàng)建bootstrap配置
TOKEN_ID=$(head -c 6 /dev/urandom | md5sum | head -c 6)
TOKEN_SECRET=$(head -c 16 /dev/urandom | md5sum | head -c 16)
BOOTSTRAP_TOKEN=${TOKEN_ID}.${TOKEN_SECRET}
修改完成保存
# 到git clone 目錄
mkdir k8s.v17.0
cd k8s.v17.0
cp ../kubernetes.v1.17.sh ./
# 執(zhí)行腳本
bash kubernetes.v1.17.sh
# 查看生成數(shù)據(jù)
[root@k8s-node-09 k8s.v17.0]# ls
README.md cni.yml environment.sh haproxy.yml keepalived.yml kube-controller-manager.yml kubeconfig kubernetes.v1.17.sh package.yml yaml
cfssl docker.yml etcd.yml iptables.yml kube-apiserver.yml kube-scheduler.yml kubelet.yml lxcfs.yml roles
[root@k8s-node-09 k8s.v17.0]# tree
.
├── README.md
├── cfssl
│?? ├── ca-config.json
│?? ├── etcd
│?? │?? ├── etcd-ca-csr.json
│?? │?? ├── etcd-client.json
│?? │?? ├── etcd-server.json
│?? │?? ├── k8s-master-01.json
│?? │?? ├── k8s-master-02.json
│?? │?? └── k8s-master-03.json
│?? ├── k8s
│?? │?? ├── aggregator.json
│?? │?? ├── k8s-apiserver-admin.json
│?? │?? ├── k8s-apiserver.json
│?? │?? ├── k8s-ca-csr.json
│?? │?? ├── k8s-controller-manager.json
│?? │?? ├── k8s-scheduler.json
│?? │?? └── kube-router.json
│?? └── pki
│?? ├── etcd
│?? │?? ├── etcd-ca-key.pem
│?? │?? ├── etcd-ca.csr
│?? │?? ├── etcd-ca.pem
│?? │?? ├── etcd-client-key.pem
│?? │?? ├── etcd-client.csr
│?? │?? ├── etcd-client.pem
│?? │?? ├── etcd-member-k8s-master-01-key.pem
│?? │?? ├── etcd-member-k8s-master-01.csr
│?? │?? ├── etcd-member-k8s-master-01.pem
│?? │?? ├── etcd-member-k8s-master-02-key.pem
│?? │?? ├── etcd-member-k8s-master-02.csr
│?? │?? ├── etcd-member-k8s-master-02.pem
│?? │?? ├── etcd-member-k8s-master-03-key.pem
│?? │?? ├── etcd-member-k8s-master-03.csr
│?? │?? ├── etcd-member-k8s-master-03.pem
│?? │?? ├── etcd-server-key.pem
│?? │?? ├── etcd-server.csr
│?? │?? └── etcd-server.pem
│?? └── k8s
│?? ├── aggregator-key.pem
│?? ├── aggregator.csr
│?? ├── aggregator.pem
│?? ├── k8s-apiserver-admin-key.pem
│?? ├── k8s-apiserver-admin.csr
│?? ├── k8s-apiserver-admin.pem
│?? ├── k8s-ca-key.pem
│?? ├── k8s-ca.csr
│?? ├── k8s-ca.pem
│?? ├── k8s-controller-manager-key.pem
│?? ├── k8s-controller-manager.csr
│?? ├── k8s-controller-manager.pem
│?? ├── k8s-scheduler-key.pem
│?? ├── k8s-scheduler.csr
│?? ├── k8s-scheduler.pem
│?? ├── k8s-server-key.pem
│?? ├── k8s-server.csr
│?? ├── k8s-server.pem
│?? ├── kube-router-key.pem
│?? ├── kube-router.csr
│?? └── kube-router.pem
├── cni.yml
├── docker.yml
├── environment.sh
├── etcd.yml
├── haproxy.yml
├── iptables.yml
├── keepalived.yml
├── kube-apiserver.yml
├── kube-controller-manager.yml
├── kube-scheduler.yml
├── kubeconfig
│?? ├── admin.kubeconfig
│?? ├── bootstrap.kubeconfig
│?? ├── kube-controller-manager.kubeconfig
│?? ├── kube-router.kubeconfig
│?? └── kube-scheduler.kubeconfig
├── kubelet.yml
├── kubernetes.v1.17.sh
├── lxcfs.yml
├── package.yml
├── roles
│?? ├── cni
│?? │?? ├── files
│?? │?? │?? └── bin
│?? │?? │?? ├── bandwidth
│?? │?? │?? ├── bridge
│?? │?? │?? ├── dhcp
│?? │?? │?? ├── firewall
│?? │?? │?? ├── flannel
│?? │?? │?? ├── host-device
│?? │?? │?? ├── host-local
│?? │?? │?? ├── ipvlan
│?? │?? │?? ├── loopback
│?? │?? │?? ├── macvlan
│?? │?? │?? ├── portmap
│?? │?? │?? ├── ptp
│?? │?? │?? ├── sbr
│?? │?? │?? ├── static
│?? │?? │?? ├── tuning
│?? │?? │?? └── vlan
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? ├── docker
│?? │?? ├── files
│?? │?? │?? └── bin
│?? │?? │?? ├── containerd
│?? │?? │?? ├── containerd-shim
│?? │?? │?? ├── ctr
│?? │?? │?? ├── docker
│?? │?? │?? ├── docker-init
│?? │?? │?? ├── docker-proxy
│?? │?? │?? ├── dockerd
│?? │?? │?? └── runc
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── containerd.service
│?? │?? ├── daemon.json
│?? │?? ├── docker.service
│?? │?? └── docker.socket
│?? ├── etcd
│?? │?? ├── files
│?? │?? │?? ├── bin
│?? │?? │?? │?? ├── etcd
│?? │?? │?? │?? └── etcdctl
│?? │?? │?? └── ssl
│?? │?? │?? ├── etcd-ca-key.pem
│?? │?? │?? ├── etcd-ca.pem
│?? │?? │?? ├── etcd-client-key.pem
│?? │?? │?? ├── etcd-client.pem
│?? │?? │?? ├── etcd-member-k8s-master-01-key.pem
│?? │?? │?? ├── etcd-member-k8s-master-01.pem
│?? │?? │?? ├── etcd-member-k8s-master-02-key.pem
│?? │?? │?? ├── etcd-member-k8s-master-02.pem
│?? │?? │?? ├── etcd-member-k8s-master-03-key.pem
│?? │?? │?? ├── etcd-member-k8s-master-03.pem
│?? │?? │?? ├── etcd-server-key.pem
│?? │?? │?? └── etcd-server.pem
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── etcd
│?? │?? └── etcd.service
│?? ├── haproxy
│?? │?? ├── files
│?? │?? │?? └── haproxy-2.1.1.tar.gz
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── 49-haproxy.conf
│?? │?? ├── haproxy
│?? │?? ├── haproxy.conf
│?? │?? └── haproxy.service
│?? ├── iptables
│?? │?? ├── files
│?? │?? │?? └── iptables-1.6.2.tar.bz2
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? ├── keepalived
│?? │?? ├── files
│?? │?? │?? ├── automake-1.15.1.tar.gz
│?? │?? │?? └── keepalived-2.0.19.tar.gz
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── keepalived
│?? │?? ├── keepalived.conf
│?? │?? └── keepalived.service
│?? ├── kube-apiserver
│?? │?? ├── files
│?? │?? │?? ├── bin
│?? │?? │?? │?? └── kube-apiserver
│?? │?? │?? ├── config
│?? │?? │?? │?? ├── audit-policy.yaml
│?? │?? │?? │?? └── encryption-config.yaml
│?? │?? │?? └── ssl
│?? │?? │?? ├── etcd
│?? │?? │?? │?? ├── etcd-ca.pem
│?? │?? │?? │?? ├── etcd-client-key.pem
│?? │?? │?? │?? └── etcd-client.pem
│?? │?? │?? └── k8s
│?? │?? │?? ├── aggregator-key.pem
│?? │?? │?? ├── aggregator.pem
│?? │?? │?? ├── k8s-ca.pem
│?? │?? │?? ├── k8s-server-key.pem
│?? │?? │?? └── k8s-server.pem
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── kube-apiserver
│?? │?? └── kube-apiserver.service
│?? ├── kube-controller-manager
│?? │?? ├── files
│?? │?? │?? ├── bin
│?? │?? │?? │?? └── kube-controller-manager
│?? │?? │?? └── ssl
│?? │?? │?? └── k8s
│?? │?? │?? ├── k8s-ca-key.pem
│?? │?? │?? ├── k8s-ca.pem
│?? │?? │?? ├── k8s-controller-manager-key.pem
│?? │?? │?? └── k8s-controller-manager.pem
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── kube-controller-manager
│?? │?? ├── kube-controller-manager.kubeconfig
│?? │?? └── kube-controller-manager.service
│?? ├── kube-scheduler
│?? │?? ├── files
│?? │?? │?? └── bin
│?? │?? │?? └── kube-scheduler
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── kube-scheduler
│?? │?? ├── kube-scheduler.kubeconfig
│?? │?? └── kube-scheduler.service
│?? ├── kubelet
│?? │?? ├── files
│?? │?? │?? ├── bin
│?? │?? │?? │?? └── kubelet
│?? │?? │?? └── ssl
│?? │?? │?? └── k8s
│?? │?? │?? └── k8s-ca.pem
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? │?? ├── bootstrap.kubeconfig
│?? │?? ├── kubelet
│?? │?? └── kubelet.service
│?? ├── lxcfs
│?? │?? ├── files
│?? │?? │?? ├── lib
│?? │?? │?? │?? └── lxcfs
│?? │?? │?? │?? ├── liblxcfs.la
│?? │?? │?? │?? └── liblxcfs.so
│?? │?? │?? ├── lxcfs
│?? │?? │?? └── lxcfs.service
│?? │?? ├── tasks
│?? │?? │?? └── main.yml
│?? │?? └── templates
│?? └── package
│?? ├── files
│?? ├── tasks
│?? │?? └── main.yml
│?? └── templates
└── yaml
├── allow-lxcfs-tz-env.yaml
├── bootstrap-secret.yaml
├── kube-api-rbac.yaml
├── kube-router.yaml
└── kubelet-bootstrap-rbac.yaml
75 directories, 179 files
## 特別說明: keepalived 部署 必須單個(gè)部署 三節(jié)點(diǎn)部署
ansible-playbook -i 192.168.2.247, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA1 -e HA1_ID=100 -e HA2_ID=110 -e HA3_ID=120 -e STATE_3=MASTER
ansible-playbook -i 192.168.2.248, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA2 -e HA1_ID=110 -e HA2_ID=120 -e HA3_ID=100 -e STATE_2=MASTER
ansible-playbook -i 192.168.2.249, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA3 -e HA1_ID=120 -e HA2_ID=100 -e HA3_ID=110 -e STATE_1=MASTER
# 大于3節(jié)點(diǎn)部署 5節(jié)點(diǎn)
ansible-playbook -i "192.168.2.247", keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA1 -e HA1_ID=100 -e HA2_ID=110 -e HA3_ID=140 -e STATE_3=MASTER
ansible-playbook -i "192.168.2.248", keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA2 -e HA1_ID=110 -e HA2_ID=140 -e HA3_ID=130 -e STATE_2=MASTER
ansible-playbook -i "192.168.2.249", keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA3 -e HA1_ID=140 -e HA2_ID=100 -e HA3_ID=120 -e STATE_1=MASTER
ansible-playbook -i "192.168.2.250", keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA4 -e HA1_ID=130 -e HA2_ID=120 -e HA3_ID=110
ansible-playbook -i "192.168.2.251", keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA5 -e HA1_ID=120 -e HA2_ID=130 -e HA3_ID=100
# 安裝說明文件執(zhí)行就OK node 節(jié)點(diǎn)安裝ip 很多可以寫如文件例如:
vi node
192.168.2.165
192.168.2.167
192.168.2.189
192.168.2.196
192.168.2.247
192.168.2.248
192.168.2.249
192.168.2.250
192.168.2.251
192.168.2.252
192.168.2.253
# 執(zhí)行
ansible-playbook -i node xxx.yml
[root@k8s-node-09 k8s.v17.0]# cat README.md
########## mkdir -p /root/.kube
##########復(fù)制admin kubeconfig 到root用戶作為kubectl 工具默認(rèn)密鑰文件
########## \cp -pdr /opt/k8s/k8s.v17.0/kubeconfig/admin.kubeconfig /root/.kube/config
###################################################################################
########## ansible 及ansible-playbook 單個(gè)ip ip結(jié)尾一點(diǎn)要添加“,”符號(hào) ansible-playbook -i 192.168.0.1, xxx.yml
########## source /opt/k8s/k8s.v17.0/environment.sh 設(shè)置環(huán)境變量生效方便后期新增證書等
########## etcd 部署 ansible-playbook -i "192.168.2.247","192.168.2.248","192.168.2.249" etcd.yml
########## etcd EVENTS 部署 ansible-playbook -i , events-etcd.yml
########## kube-apiserver 部署 ansible-playbook -i "192.168.2.247","192.168.2.248","192.168.2.249", kube-apiserver.yml
########## haproxy 部署 ansible-playbook -i "192.168.2.247","192.168.2.248","192.168.2.249", haproxy.yml
########## keepalived 節(jié)點(diǎn)IP "192.168.2.247","192.168.2.248","192.168.2.249" 安裝keepalived使用IP 如果大于三個(gè)節(jié)點(diǎn)安裝keepalived 記得HA1_ID 唯一的也就是priority的值
########## keepalived 也可以全部部署為BACKUP STATE_x 可以使用默認(rèn)值 IFACE 網(wǎng)卡名字默認(rèn)eth0 ROUTER_ID 全局唯一ID HA1_ID為priority值
########## keepalived 部署 節(jié)點(diǎn)1 ansible-playbook -i 節(jié)點(diǎn)ip1, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA1 -e HA1_ID=100 -e HA2_ID=110 -e HA3_ID=120 -e STATE_3=MASTER
########## keepalived 部署 節(jié)點(diǎn)2 ansible-playbook -i 節(jié)點(diǎn)ip2, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA2 -e HA1_ID=110 -e HA2_ID=120 -e HA3_ID=100 -e STATE_2=MASTER
########## keepalived 部署 節(jié)點(diǎn)3 ansible-playbook -i 節(jié)點(diǎn)ip3, keepalived.yml -e IFACE=eth0 -e ROUTER_ID=HA3 -e HA1_ID=120 -e HA2_ID=100 -e HA3_ID=110 -e STATE_1=MASTER
########## kube-controller-manager kube-scheduler ansible-playbook -i "192.168.2.247","192.168.2.248","192.168.2.249", kube-controller-manager.yml kube-scheduler.yml
########## 部署完成驗(yàn)證集群 kubectl cluster-info kubectl api-versions kubectl get cs 1.16 kubectl 顯示不正常
########## 提交bootstrap 跟授權(quán)到K8S 集群 kubectl apply -f /opt/k8s/k8s.v17.0/yaml/bootstrap-secret.yaml
########## 提交授權(quán)到K8S集群 kubectl apply -f /opt/k8s/k8s.v17.0/yaml/kubelet-bootstrap-rbac.yaml kubectl apply -f /opt/k8s/k8s.v17.0/yaml/kube-api-rbac.yaml
########## 系統(tǒng)版本為centos7 或者 ubuntu18 請(qǐng)先升級(jí) iptables ansible-playbook -i 要安裝node ip列表, iptables.yml
########## 安裝K8S node 使用kube-router ansible部署 ansible-playbook -i 要安裝node ip列表 package.yml lxcfs.yml docker.yml kubelet.yml
########## 安裝K8S node 使用 flannel 網(wǎng)絡(luò)插件ansible部署ansible-playbook -i 要安裝node ip列表 package.yml lxcfs.yml docker.yml kubelet.yml kube-proxy.yml
########## 部署自動(dòng)掛載日期與lxcfs 到pod的 PodPreset kubectl apply -f /opt/k8s/k8s.v17.0/yaml/allow-lxcfs-tz-env.yaml -n kube-system " kube-system 命名空間名字"PodPreset 只是當(dāng)前空間生效所以需要每個(gè)命名空間執(zhí)行
########## 查看node 節(jié)點(diǎn)是否注冊(cè)到K8S kubectl get node kubectl get csr 如果有節(jié)點(diǎn) kube-router 方式部署 kubectl apply -f /opt/k8s/k8s.v17.0/yaml/kube-router.yaml 等待容器部署完成查看node ip a | grep kube-bridge
########## flannel 網(wǎng)絡(luò)插件部署 kubectl apply -f /opt/k8s/k8s.v17.0/yaml/flannel.yaml 等待容器部署完成查看node 節(jié)點(diǎn)網(wǎng)絡(luò) ip a| grep flannel.1
########## 給 master ingress 添加污點(diǎn) 防止其它服務(wù)使用這些節(jié)點(diǎn):kubectl taint nodes k8s-master-01 node-role.kubernetes.io/master=:NoSchedule kubectl taint nodes k8s-ingress-01 node-role.kubernetes.io/ingress=:NoSchedule
########## calico 網(wǎng)絡(luò)插件部署 50節(jié)點(diǎn)內(nèi) wget https://docs.projectcalico.org/v3.10/manifests/calico.yaml 大于50節(jié)點(diǎn) wget https://docs.projectcalico.org/v3.10/manifests/calico-typha.yaml
########## 如果cni配置沒放到默認(rèn)路徑請(qǐng)創(chuàng)建軟鏈 ln -s /apps/cni/etc /etc/cni 同時(shí)修改yaml hostPath路徑 同時(shí)修改CALICO_IPV4POOL_CIDR 參數(shù)為 10.80.0.0/12 CALICO_IPV4POOL_IPIP: Never 啟用bgp模式
########## windows 證書訪問 openssl pkcs12 -export -inkey k8s-apiserver-admin-key.pem -in k8s_apiserver-admin.pem -out client.p12
########## kubectl proxy --port=8001 & 把kube-apiserver 端口映射成本地 8001 端口
########## 查看kubelet節(jié)點(diǎn)配置信息 NODE_NAME="k8s-node-04"; curl -sSL "http://localhost:8001/api/v1/nodes/${NODE_NAME}/proxy/configz" | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubele
t_configz_${NODE_NAME}
# 生效環(huán)境變量
source /opt/k8s/k8s.v17.0/environment.sh
cat << EOF | tee ${HOST_PATH}/cfssl/k8s/kubernetes-dashboard.json
{
"CN": "kubernetes-dashboard",
"hosts": [""],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "$CERT_ST",
"L": "$CERT_L",
"O": "$CERT_O",
"OU": "$CERT_OU"
}
]
}
EOF
# 生成新的證書
cfssl gencert \
-ca=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem \
-ca-key=${HOST_PATH}/cfssl/pki/k8s/k8s-ca-key.pem \
-config=${HOST_PATH}/cfssl/ca-config.json \
-profile=${CERT_PROFILE} \
${HOST_PATH}/cfssl/k8s/kubernetes-dashboard.json | \
cfssljson -bare ./kubernetes-dashboard
kubectl delete deployments --all -A
kubectl delete daemonsets --all -A
kubectl delete statefulsets --all -A
ansible -i node all -m shell -a "systemctl stop kubelet"
ansible -i node all -m shell -a "systemctl stop docker"
ansible -i node all -m shell -a "systemctl stop kube-proxy"
ansible -i node all -m shell -a "systemctl stop containerd"
ansible -i node all -m shell -a "systemctl stop kube-scheduler"
ansible -i node all -m shell -a "systemctl stop kube-controller-manager"
ansible -i node all -m shell -a "systemctl stop kube-apiserver"
ansible -i node all -m shell -a "systemctl stop etcd"
ansible -i node all -m shell -a "systemctl stop haproxy"
ansible -i node all -m shell -a "systemctl stop keepalived"
ansible -i node all -m shell -a "umount /apps/docker/root/netns/default"
# 目錄更改成自己環(huán)境路徑 如果docker 跟其它應(yīng)用部署一起就直接可以刪除
ansible -i node all -m shell -a "rm -rf /apps/*"
ansible -i node all -m shell -a "rm -rf /etc/cni"
ansible -i node all -m shell -a "rm -rf /etc/docker"
ansible -i node all -m shell -a "rm -rf /etc/containerd"
ansible -i node all -m shell -a "rm -f /usr/bin/docker*"
ansible -i node all -m shell -a "rm -f /usr/bin/containerd*"
ansible -i node all -m shell -a "rm -f /usr/bin/ctr"
ansible -i node all -m shell -a "rm -f /usr/bin/runc"
# 關(guān)閉開機(jī)啟動(dòng)
ansible -i node all -m shell -a "systemctl disable kubelet"
ansible -i node all -m shell -a "systemctl disable docker"
ansible -i node all -m shell -a "systemctl disable kube-proxy"
ansible -i node all -m shell -a "systemctl disable containerd"
ansible -i node all -m shell -a "systemctl disable kube-scheduler"
ansible -i node all -m shell -a "systemctl disable kube-controller-manager"
ansible -i node all -m shell -a "systemctl disable kube-apiserver"
ansible -i node all -m shell -a "systemctl disable etcd"
ansible -i node all -m shell -a "systemctl disable haproxy"
ansible -i node all -m shell -a "systemctl disable keepalived"
# 刪除開機(jī)啟動(dòng)文件
ansible -i node all -m shell -a "rm -f /lib/systemd/system/kubelet.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/docker.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/docker.socket"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/kube-proxy.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/containerd.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/kube-scheduler.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/kube-controller-manager.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/kube-apiserver.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/etcd.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/haproxy.service"
ansible -i node all -m shell -a "rm -f /lib/systemd/system/keepalived.service"
另外有需要云服務(wù)器可以了解下創(chuàng)新互聯(lián)cdcxhl.cn,海內(nèi)外云服務(wù)器15元起步,三天無理由+7*72小時(shí)售后在線,公司持有idc許可證,提供“云服務(wù)器、裸金屬服務(wù)器、高防服務(wù)器、香港服務(wù)器、美國(guó)服務(wù)器、虛擬主機(jī)、免備案服務(wù)器”等云主機(jī)租用服務(wù)以及企業(yè)上云的綜合解決方案,具有“安全穩(wěn)定、簡(jiǎn)單易用、服務(wù)可用性高、性價(jià)比高”等特點(diǎn)與優(yōu)勢(shì),專為企業(yè)上云打造定制,能夠滿足用戶豐富、多元化的應(yīng)用場(chǎng)景需求。
創(chuàng)新互聯(lián)建站從2013年成立,是專業(yè)互聯(lián)網(wǎng)技術(shù)服務(wù)公司,擁有項(xiàng)目成都做網(wǎng)站、成都網(wǎng)站制作網(wǎng)站策劃,項(xiàng)目實(shí)施與項(xiàng)目整合能力。我們以讓每一個(gè)夢(mèng)想脫穎而出為使命,1280元達(dá)坂城做網(wǎng)站,已為上家服務(wù),為達(dá)坂城各地企業(yè)和個(gè)人服務(wù),聯(lián)系電話:18980820575
分享文章:kubernetes二進(jìn)制部署ansibleplaybook一鍵生成-創(chuàng)新互聯(lián)
文章網(wǎng)址:http://www.rwnh.cn/article24/hdpje.html
成都網(wǎng)站建設(shè)公司_創(chuàng)新互聯(lián),為您提供面包屑導(dǎo)航、網(wǎng)站維護(hù)、小程序開發(fā)、動(dòng)態(tài)網(wǎng)站、軟件開發(fā)、用戶體驗(yàn)
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉(zhuǎn)載內(nèi)容為主,如果涉及侵權(quán)請(qǐng)盡快告知,我們將會(huì)在第一時(shí)間刪除。文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如需處理請(qǐng)聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉(zhuǎn)載,或轉(zhuǎn)載時(shí)需注明來源: 創(chuàng)新互聯(lián)
猜你還喜歡下面的內(nèi)容