2024 年云原生运维实战文档 99 篇原创计划 第 005 篇 |Docker 最佳实战「2024」系列 第 005 篇
你好,欢迎来到运维有术。
今天分享的内容是 Docker 最佳实战「2024」 系列文档中的 Docker 部署 etcd 集群实战。
内容导图
实战服务器配置 (架构 1:1 复刻小规模生产环境,配置略有不同)
主机名 | IP | CPU(核) | 内存(GB) | 系统盘(GB) | 数据盘(GB) | 用途 |
---|---|---|---|---|---|---|
docker-node-1 | 192.168.9.81 | 4 | 16 | 40 | 100 | Docker 节点 1 |
docker-node-2 | 192.168.9.82 | 4 | 16 | 40 | 100 | Docker 节点 2 |
docker-node-3 | 192.168.9.83 | 4 | 16 | 40 | 100 | Docker 节点 3 |
合计 | 3 | 12 | 48 | 120 | 300 |
实战环境涉及软件版本信息
上一期我们基于 coreos 官方提供的 Docker 镜像完成了单节点 etcd 部署实战。
本期我们继续基于配置文件和环境变量两种方式,完成 etcd 集群部署实战。
本节内容使用配置文件的方式实现 etcd 集群服务配置。
mkdir -p /data/containers/etcd/{data,config}
etcd 服务配置文件路径为:/data/containers/etcd/config/etcd.conf.yml
每个节点内容不一样,需要分别编写。
节点 1 的文件内容如下:
name: etcd-1
data-dir: /var/etcd
listen-client-urls: http://0.0.0.0:2379
advertise-client-urls: http://192.168.9.81:2379
listen-peer-urls: http://0.0.0.0:2380
initial-advertise-peer-urls: http://192.168.9.81:2380
initial-cluster: etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
initial-cluster-token: etcd-cluster
initial-cluster-state: new
logger: zap
log-level: info
#log-outputs: stder
节点 2 的文件内容如下:
name: etcd-2
data-dir: /var/etcd
listen-client-urls: http://0.0.0.0:2379
advertise-client-urls: http://192.168.9.82:2379
listen-peer-urls: http://0.0.0.0:2380
initial-advertise-peer-urls: http://192.168.9.82:2380
initial-cluster: etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
initial-cluster-token: etcd-cluster
initial-cluster-state: new
logger: zap
log-level: info
#log-outputs: stderr
节点 3 的文件内容如下:
name: etcd-3
data-dir: /var/etcd
listen-client-urls: http://0.0.0.0:2379
advertise-client-urls: http://192.168.9.83:2379
listen-peer-urls: http://0.0.0.0:2380
initial-advertise-peer-urls: http://192.168.9.83:2380
initial-cluster: etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
initial-cluster-token: etcd-cluster
initial-cluster-state: new
logger: zap
log-level: info
#log-outputs: stder
配置文件特殊配置项说明:
version: '3'
services:
etcd:
container_name: etcd
image: quay.io/coreos/etcd:v3.5.12
command: /usr/local/bin/etcd --config-file=/var/lib/etcd/conf/etcd.conf.yml
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- ${DOCKER_VOLUME_DIRECTORY:-.}/config/etcd.conf.yml:/var/lib/etcd/conf/etcd.conf.yml
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
所有节点都执行下面的命令,完成 etcd 集群服务的创建。
cd /data/containers/etcd
docker compose up -d
命令成功执行的结果如下 (以节点 1 为例):
[root@docker-node-1 etcd]# docker compose up -d
[+] Running 1/2
⠸ Network etcd-tier Created 0.4s
✔ Container etcd-s1 Started
[root@docker-node-1 etcd]# docker compose ps -a
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
etcd-1 quay.io/coreos/etcd:v3.5.12 "/usr/local/bin/etcd…" etcd 53 seconds ago Up 53 seconds 0.0.0.0:2379-2380->2379-2380/tcp, :::2379-2380->2379-2380/tcp
本节内容使用环境变量的方式实现 etcd 服务配置。
mkdir -p /data/containers/etcd/data
每个节点内容不一样,需要分别编写,vi/data/containers/etcd/docker-compose.yml
节点 1 的文件内容如下:
version: '3'
services:
etcd:
container_name: etcd-s1
image: quay.io/coreos/etcd:v3.5.12
environment:
- ETCD_NAME=etcd-1
- ETCD_DATA_DIR=/var/etcd
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://192.168.9.81:2379
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.168.9.81:2380
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
- ETCD_INITIAL_CLUSTER_STATE=new
- ETCD_LOGGER=zap
- ETCD_LOG_LeveL=info
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
节点 2 的文件内容如下:
version: '3'
services:
etcd:
container_name: etcd-s1
image: quay.io/coreos/etcd:v3.5.12
environment:
- ETCD_NAME=etcd-1
- ETCD_DATA_DIR=/var/etcd
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://192.168.9.82:2379
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.168.9.82:2380
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
- ETCD_INITIAL_CLUSTER_STATE=new
- ETCD_LOGGER=zap
- ETCD_LOG_LeveL=info
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
节点 3 的文件内容如下:
version: '3'
services:
etcd:
container_name: etcd-s1
image: quay.io/coreos/etcd:v3.5.12
environment:
- ETCD_NAME=etcd-1
- ETCD_DATA_DIR=/var/etcd
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://192.168.9.83:2379
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.168.9.83:2380
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
- ETCD_INITIAL_CLUSTER_STATE=new
- ETCD_LOGGER=zap
- ETCD_LOG_LeveL=info
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
配置文件特殊配置项说明:
ETCD_INITIAL_ADVERTISE_PEER_URLS
地址所有节点都执行下面的命令,完成 etcd 集群服务的创建。
cd /data/containers/etcd
docker compose up -d
[root@docker-node-1 etcd]# docker compose ps -a
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
etcd-1 quay.io/coreos/etcd:v3.5.12 "/usr/local/bin/etcd…" etcd 53 seconds ago Up 53 seconds 0.0.0.0:2379-2380->2379-2380/tcp, :::2379-2380->2379-2380/tcp
为了测试 etcd 服务的可用性,额外再找一台机器安装 etcd 客户端工具用于验证测试。
cd /srv
wget https://github.com/etcd-io/etcd/releases/download/v3.5.12/etcd-v3.5.12-linux-amd64.tar.gz
tar xvf etcd-v3.5.12-linux-amd64.tar.gz
cd etcd-v3.5.12-linux-amd64
# ./etcdctl --endpoints=192.168.9.81:2379,192.168.9.82:2379,192.168.9.83:2379 --write-out=table endpoint health
+-------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+-------------------+--------+-------------+-------+
| 192.168.9.81:2379 | true | 6.698562ms | |
| 192.168.9.82:2379 | true | 20.894522ms | |
| 192.168.9.83:2379 | true | 21.585328ms | |
+-------------------+--------+-------------+-------+
# ./etcdctl --endpoints=192.168.9.81:2379,192.168.9.82:2379,192.168.9.83:2379 --write-out=table endpoint status
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.9.81:2379 | ee1780cf52566749 | 3.5.12 | 20 kB | true | false | 2 | 13 | 13 | |
| 192.168.9.82:2379 | 94e339f5d245a36b | 3.5.12 | 20 kB | false | false | 2 | 13 | 13 | |
| 192.168.9.83:2379 | 247803a54771eb27 | 3.5.12 | 20 kB | false | false | 2 | 13 | 13 | |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
# ./etcdctl --endpoints=192.168.9.81:2379,192.168.9.82:2379,192.168.9.83:2379 --write-out=table member list
+------------------+---------+--------+--------------------------+--------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+--------------------------+--------------------------+------------+
| 247803a54771eb27 | started | etcd-3 | http://192.168.9.83:2380 | http://192.168.9.83:2379 | false |
| 94e339f5d245a36b | started | etcd-2 | http://192.168.9.82:2380 | http://192.168.9.82:2379 | false |
| ee1780cf52566749 | started | etcd-1 | http://192.168.9.81:2380 | http://192.168.9.81:2379 | false |
+------------------+---------+--------+--------------------------+--------------------------+------------+
# 写入数据
# ./etcdctl --endpoints=192.168.9.81:2379,192.168.9.82:2379,192.168.9.83:2379 put foo bar
OK
# 读取数据
# ./etcdctl --endpoints=192.168.9.81:2379,192.168.9.82:2379,192.168.9.83:2379 get foo
foo
bar
全文所有的操作,我都整理成了自动化脚本。
自动化脚本说明:
编写部署脚本 deploy-etcd-cluser-conf.sh
#!/bin/bash
# author:@运维有术
set -e
# 设置 etcd 服务名称,必须修改
etcd_name=${1:-"etcd-1"}
# 设置 etcd 服务对外 IP,必须修改(填写 etcd 容器所在节点对外提供服务的 IP)
etcd_client_ip=${2:-"0.0.0.0"}
# 设置 etcd 初始化集群信息,必须修改
etcd_initial_cluster=${3:-"etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380"}
# 设置 containers 基础目录,可选修改
docker_container_dir=${4:-"/data/containers"}
# 创建基础目录
mkdir -p ${docker_container_dir}/etcd/{data,config}
# 创建 etcd 配置文件
function deploy_etcd_config(){
cat > ${docker_container_dir}/etcd/config/etcd.conf.yml <<-EOF
name: ${etcd_name}
data-dir: /var/etcd
listen-client-urls: http://0.0.0.0:2379
advertise-client-urls: http://${etcd_client_ip}:2379
listen-peer-urls: http://0.0.0.0:2380
initial-advertise-peer-urls: http://${etcd_client_ip}:2380
initial-cluster: ${etcd_initial_cluster}
initial-cluster-token: etcd-cluster
initial-cluster-state: new
logger: zap
log-level: info
#log-outputs: stderr
EOF
}
# 创建 docker-compose 文件
function deploy_compose_config(){
cat > ${docker_container_dir}/etcd/docker-compose.yml <<-EOF
version: '3'
services:
etcd:
container_name: ${etcd_name}
image: quay.io/coreos/etcd:v3.5.12
command: /usr/local/bin/etcd --config-file=/var/lib/etcd/conf/etcd.conf.yml
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- ${DOCKER_VOLUME_DIRECTORY:-.}/config/etcd.conf.yml:/var/lib/etcd/conf/etcd.conf.yml
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
EOF
}
# 创建 etcd 服务
function deploy_etcd(){
cd ${docker_container_dir}/etcd
docker compose up -d
}
# 验证 etcd 服务
function check_etcd(){
cd ${docker_container_dir}/etcd
docker compose ps
}
echo -e "\033[1;32m [1].Deploy etcd config.\n \033[0m"
deploy_etcd_config
echo -e "\033[1;32m [2].Deploy docker compose config.\n \033[0m"
deploy_compose_config
echo -e "\033[1;32m [3].Deploy etcd service.\n \033[0m"
deploy_etcd
echo -e "\033[1;32m [4].Check etcd service status. \033[0m"
check_etcd
使用脚本部署 etcd 集群。
./deploy-etcd-cluster-conf.sh etcd-1 192.168.9.81 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
./deploy-etcd-cluster-conf.sh etcd-2 192.168.9.82 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
./deploy-etcd-cluster-conf.sh etcd-3 192.168.9.83 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
编写部署脚本 deploy-etcd-cluster-env.sh
#!/bin/bash
# author:@运维有术
set -e
# 设置 etcd 服务名称,必须修改
etcd_name=${1:-"etcd-1"}
# 设置 etcd 服务对外 IP,必须修改(填写 etcd 容器所在节点对外提供服务的 IP)
etcd_client_ip=${2:-"0.0.0.0"}
# 设置 etcd 初始化集群信息,必须修改
etcd_initial_cluster=${3:-"etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380"}
# 设置 containers 基础目录,可选修改
docker_container_dir=${4:-"/data/containers"}
# 创建基础目录
mkdir -p ${docker_container_dir}/etcd/data
# 创建 docker-compose 文件
function deploy_compose_config(){
cat > ${docker_container_dir}/etcd/docker-compose.yml <<-EOF
version: '3'
services:
etcd:
container_name: ${etcd_name}
image: quay.io/coreos/etcd:v3.5.12
environment:
- ETCD_NAME=${etcd_name}
- ETCD_DATA_DIR=/var/etcd
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://${etcd_client_ip}:2379
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://${etcd_client_ip}:2380
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
- ETCD_INITIAL_CLUSTER_STATE=new
- ETCD_LOGGER=zap
- ETCD_LOG_LeveL=info
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/data:/var/etcd
- "/etc/localtime:/etc/localtime:ro"
ports:
- 2379:2379
- 2380:2380
restart: always
networks:
default:
name: etcd-tier
driver: bridge
EOF
}
# 创建 etcd 服务
function deploy_etcd(){
cd ${docker_container_dir}/etcd
docker compose up -d
}
# 验证 etcd 服务
function check_etcd(){
cd ${docker_container_dir}/etcd
docker compose ps
}
echo -e "\033[1;32m [1].Deploy docker compose config.\n \033[0m"
deploy_compose_config
echo -e "\033[1;32m [2].Deploy etcd service.\n \033[0m"
deploy_etcd
echo -e "\033[1;32m [3].Check etcd service status. \033[0m"
check_etcd
使用脚本部署 etcd 集群。
./deploy-etcd-cluster-env.sh etcd-1 192.168.9.81 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
./deploy-etcd-cluster-env.sh etcd-2 192.168.9.82 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
./deploy-etcd-cluster-env.sh etcd-3 192.168.9.83 etcd-1=http://192.168.9.81:2380,etcd-2=http://192.168.9.82:2380,etcd-3=http://192.168.9.83:2380
根据本文所学,请完成以下实战任务。
本文分享了基于 coreos 官方提供的 etcd 镜像部署 etcd 集群服务的详细流程及注意事项。主要内容概括如下:
Get 本文实战视频(请注意,文档视频异步发行,请先关注)
免责声明:
如果你喜欢本文,请分享、收藏、点赞、评论! 请持续关注 @ 运维有术,及时收看更多好文!
欢迎加入 「运维有术·云原生实战训练营」 ,获取更多的 KubeSphere、Kubernetes、云原生运维实战技能。
版权声明
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。