服务器版本: CentOS Linux 7 (Core)
docker版本: 3.10.0-957.el7.x86_64
1.先删除原有的docker安装包
yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
2.安装yum-utils
yum install -y yum-utils device-mapper-persistent-data lvm2
3.添加yum安装docker的仓库
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
4.列举可安装的docker版本
yum list docker-ce --showduplicates | sort -r
5.docker根据版本进行安装
yum install docker-ce-20.10.6-3.el7 docker-ce-cli-20.10.6-3.el7 containerd.io
6.启动docker
systemctl start docker
7.设置开机启动
systemctl enable docker
8.docker swarm集群
# docker swarm 初始化
docker swarm init --advertise-addr 192.168.174.133
# 添加worker工作节点
docker swarm join --token SWMTKN-1-2c3erdp3im7mprf3mzwg9dtmtikh3hpr2e983v3gnqspnmcnxk-8svvr6kmudsec1sdx28tnvc9q 192.168.174.133:2377
docker swarm join --token SWMTKN-1-3dfq5giu5f06f5k28marl00gnl6r01xmvet79dcwlkhoong8ju-8srgafjq4l3zlomz24jjnl0pv 192.168.100.11:2377
# 查看docker节点列表
docker node ls
# 停止接单
docker node update --availability drain lzlg01
# 重新启动节点
docker node update --availability active lzlg01
# 查看服务状态
docker service ps helloworld
# 查看服务状态
docker service inspect --pretty helloworld
# 扩展服务集群
docker service scale helloworld=2
# 删除服务
docker service rm helloworld
9.配置镜像下载节点
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://ppdui2ld.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
# 查看yum已安装的包
yum list installed | grep docker
10.docker容器重启配置
# 如果在第一次启动的时候, 没有配置--restart=always参数, 则可通过下面命令进行添加
docker container update --restart=always 容器名字
# 也可修改容器相关的json配置文件
# 配置默认文件路径为: /var/lib/docker/containers/容器ID
# 在该目录下找到一个文件 hostconfig.json, 找到该文件中关键字 RestartPolicy
# 修改前配置:“RestartPolicy”:{“Name”:“no”,“MaximumRetryCount”:0}
# 修改后配置:“RestartPolicy”:{“Name”:“always”,“MaximumRetryCount”:0}
redis版本6.0.16, 一主二从配置
# 下载redis镜像
docker pull redis:6.0.16
# 创建目录
mkdir /var/redis
# 把redis_master.conf放在主redis的服务器上
# 把redis_slave.conf放在从redis的服务器上
redis_master.conf
daemonize no
pidfile "/var/run/redis.pid"
port 6379
timeout 300
loglevel warning
logfile "redis.log"
databases 16
rdbcompression yes
dbfilename "redis.rdb"
dir "/data"
requirepass unionlive
masterauth unionlive
maxclients 10000
maxmemory 1024mb
maxmemory-policy allkeys-lru
appendonly no
protected-mode no
redis_slave.conf
daemonize no
pidfile "/var/run/redis.pid"
port 6379
timeout 300
loglevel warning
logfile "redis.log"
databases 16
rdbcompression yes
dbfilename "redis.rdb"
dir "/data"
requirepass unionlive
masterauth unionlive
maxclients 10000
maxmemory 1024mb
maxmemory-policy allkeys-lru
appendonly no
protected-mode no
appendfsync always
slaveof 192.168.199.200 6379
启动master
docker run -d --name redis_master -p 6379:6379 -v /var/redis:/data --restart=always redis:6.0.16 redis-server redis_master.conf
启动slave
docker run -d --name redis_slave -p 6379:6379 -v /var/redis:/data --restart=always redis:6.0.16 redis-server redis_slave.conf
哨兵配置文件sentinel.conf
daemonize no
protected-mode no
port 26379
pidfile "/var/run/redis.pid"
dir "/data"
sentinel monitor mymaster 192.168.199.200 6379 1
sentinel down-after-milliseconds mymaster 60000
sentinel auth-pass mymaster unionlive
sentinel config-epoch mymaster 1
sentinel leader-epoch mymaster 1
启动哨兵
docker run -d --name sentinel -p 26379:26379 -v /var/sentinel:/data --restart=always redis:6.0.16 redis-sentinel sentinel.conf
nginx版本1.20.1
docker安装(不推荐)
# 下载nginx镜像
docker pull nginx:1.20.1
# 先启动一次
docker run -d -p 80:80 --name nginx nginx:1.20.1
# 把容器中的nginx.conf复制出来
docker cp nginx:/etc/nginx/nginx.conf /var/nginx/conf/
docker cp nginx:/etc/nginx/nginx/conf.d/default.conf /var/nginx/conf/conf.d/
# 创建挂载目录
mkdir /var/nginx/html
mkdir /var/nginx/conf
mkdir /var/nginx/logs
mkdir /var/nginx/conf/conf.d
# 启动容器
docker run -d -p 80:80 -v /var/nginx/conf/nginx.conf:/etc/nginx/nginx.conf -v /var/nginx/conf/conf.d:/etc/nginx/conf.d -v /var/nginx/logs:/var/log/nginx -v /var/nginx/html:/usr/share/nginx/html --name nginx nginx:1.20.1
# 修改配置文件重启nginx容器
docker exec -it nginx service nginx reload
使用安装包安装
# 下载nginx
wget http://nginx.org/download/nginx-1.20.1.tar.gz
# 安装依赖包
yum -y install gcc make pcre-devel openssl-devel
# 解压tar包
tar -zxvf nginx-1.20.1.tar.gz
# 安装指定目录和模块
./configure --prefix=/var/nginx --with-http_gunzip_module --with-http_ssl_module
# 编译
make & make install
# 设置环境变量
vim /etc/profile
# 添加以下内容
export PATH=/var/nginx/sbin:$PATH
# 变更生效
source /etc/profile
# 查看环境变量是否生效
echo $PATH
# 测试安装是否成功
nginx -version
# 启动nginx
nginx -c 配置文件路径
常见nginx命令
# 快速关闭nginx服务
nginx -s stop
# 优雅的关闭,优雅是指当一个请求被处理完成之后才被关闭
nginx -s quit
# 重新加载配置
nginx -s reload
设置开机启动
# 进入系统启动目录
cd /lib/systemd/system/
# 创建nginx.service文件
vim nginx.service
# 文件内容
[Unit]
Description=nginx service
After=network.target
[Service]
Type=forking
ExecStart=/var/nginx/sbin/nginx -c /var/nginx/conf/nginx.conf
ExecReload=/var/nginx/sbin/nginx -s reload
ExecStop=/var/nginx/sbin/nginx -s quit
PrivateTmp=true
[Install]
WantedBy=multi-user.target
# 加入开机自启动
systemctl enable nginx
# 取消开机启动
systemctl disable nginx
# systemctl start nginx.service 启动nginx服务
# systemctl stop nginx.service 停止服务
# systemctl restart nginx.service 重新启动服务
# systemctl list-units --type=service 查看所有已启动的服务
# systemctl status nginx.service 查看服务当前状态
# systemctl enable nginx.service 设置开机自启动
# systemctl disable nginx.service 停止开机自启动
jenkins版本最新的,老版本安装插件失败, 注意要在docker swarm的manager节点进行操作
# 下载jenkins镜像
docker pull jenkins/jenkins
# 创建目录
mkdir /var/jenkins_home
# 启动jenkins容器,8080是页面端口,50000是通信端口
docker run -d -p 8080:8080 -p 50000:50000 -v /var/jenkins_home:/var/jenkins_home -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock --restart=always --network cluster --name jenkins jenkins/jenkins
# 解决jenkins目录权限问题
chown -R 1000 /var/jenkins_home/
# 配置镜像加速
cd /var/jenkins_home
vim hudson.model.UpdateCenter.xml
https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
jenkins账户/密码: admin/1qaz2wsx
jenkins重启: 在url上添加restart
http://192.168.174.133:8080/restart
配置jenkins插件
安装GitLab, Gitlab Authentication, Maven, JDK版本改为8
使用nacos需配置数据库nacos, 数据库表文件:nacos-mysql.sql
nacos版本1.2.0
nacos集群模式
# 拉取镜像
docker pull nacos/nacos-server:1.2.0
# 创建Nacos的工作目录
mkdir -p /var/nacos-server
mkdir -p /var/nacos-server/env
mkdir -p /var/nacos-server/logs
mkdir -p /var/nacos-server/init.d
# 添加custom.properties
vim /var/nacos-server/init.d/custom.properties
custom.properties文件内容:
#spring.security.enabled=false
#management.security=false
#security.basic.enabled=false
#nacos.security.ignore.urls=/
#management.metrics.export.elastic.host=http://localhost:9200
# metrics for prometheus
management.endpoints.web.exposure.include=*
# metrics for elastic search
#management.metrics.export.elastic.enabled=false
#management.metrics.export.elastic.host=http://localhost:9200
# metrics for influx
#management.metrics.export.influx.enabled=false
#management.metrics.export.influx.db=springboot
#management.metrics.export.influx.uri=http://localhost:8086
#management.metrics.export.influx.auto-create-db=true
#management.metrics.export.influx.consistency=one
#management.metrics.export.influx.compressed=true
添加nacos-hostname.env
# 文件路径
vim /var/nacos-server/env/nacos-hostname.env
# 首选主机模式
PREFER_HOST_MODE=hostname
# 当前主机的IP
NACOS_SERVER_IP=192.168.174.133
# 集群的各个节点
NACOS_SERVERS=192.168.174.133:8848 192.168.174.134:8848 192.168.174.135:8848
# 数据库的配置
MYSQL_SERVICE_HOST=192.168.174.133
MYSQL_SERVICE_DB_NAME=nacos
MYSQL_SERVICE_PORT=3306
MYSQL_SERVICE_USER=root
MYSQL_SERVICE_PASSWORD=lzlg520llx
# 从节点 这里就使用单节点测试,因此就不配置从节点
#MYSQL_SLAVE_SERVICE_HOST=xxx
#MYSQL_SLAVE_SERVICE_PORT=3306
# JVM参数 默认是2G 如果使用虚拟机,内存没有2G,就需要调整这里的参数,否则将无法启动
JVM_XMS=256m
JVM_XMX=256m
JVM_XMN=256m
传输文件到其他机器
scp nacos-hostname.env root@192.168.174.134:/var/nacos-server/env
scp custom.properties root@192.168.174.134:/var/nacos-server/init.d
启动docker容器
docker run -p 8848:8848 --restart=always --name nacos-server --env-file=/var/nacos-server/env/nacos-hostname.env -v /var/nacos-server/logs:/home/nacos/logs -v /var/nacos-server/init.d/custom.properties:/home/nacos/init.d/custom.properties -d nacos/nacos-server:1.2.0
# 1.生成密钥,密钥路径在/root/.ssh
ssh-keygen -t rsa
# 2.A的公钥(id_rsa.pub)拷贝到B,并修改id_rsa.pub为authorized_keys
scp id_rsa.pub root@192.168.174.134:/root/.ssh/authorized_keys
# 3.多个服务器,则把文件内容防止到authorized_keys下
cat /root/.ssh/a.pub >> /root/.ssh/authorized_keys
# 拉取registry镜像
docker pull registry
# 启动私有仓库,注意这里要添加自定义域名
docker run -d -p 5000:5000 --restart=always --network cluster --name registry registry
# 访问链接
http://192.168.174.133:5000/v2/_catalog
# 修改daemon.json,让docker信任私有仓库地址
vim /etc/docker/daemon.json
# 添加以下内容
{
"registry-mirrors": ["https://62oh6osg.mirror.aliyuncs.com"],
"insecure-registries":["192.168.174.133:5000"],
"log-driver":"json-file",
"log-opts":{ "max-size":"100m" }
}
# 重启docker服务
systemctl restart docker
# 设置容器总是重启
docker container update --restart=always 容器名字
Docker有以下网络类型:
# 创建网络
docker network create -d overlay --attachable cluster
# 查看网络列表
docker network ls
# 以root用户进入容器
docker exec -it -u root 容器id /bin/bash
# 限制docker使用内存
# 将memory-swap 设置值为 -1,表示容器程序使用内存受限,而 swap 空间使用不受限制。
docker update --memory 512m --memory-swap -1 jenkins
# 使用rabbitmq 3.8版本
docker pull rabbitmq:3.8
mkdir /var/rabbitmq
# 启动docker容器,用户tang 密码1qaz@WSX 内存限制512MB,超出内存,交换空间512MB(1G减去512MB)
# 目录挂载/var/rabbitmq
# 添加新端口:61613
docker run -d --name rabbitmq --restart=always -e RABBITMQ_DEFAULT_USER=tang -e RABBITMQ_DEFAULT_PASS=1qaz@WSX -v /var/rabbitmq:/var/lib/rabbitmq --memory="1g" --memory-swap="2g" -p 15672:15672 -p 5672:5672 -p 61613:61613 rabbitmq:3.8
# 进入容器,启动插件
docker exec -it rabbitmq /bin/bash
# 启动管理界面插件
rabbitmq-plugins enable rabbitmq_management
# 进入容器,防止出现[访问channel时报错]: stats in management ui are disabled on this node
cd etc/rabbitmq/conf.d/
# 查看配置文件内容
more management_agent.disable_metrics_collector.conf
# 修改配置文件信息
echo management_agent.disable_metrics_collector = false > management_agent.disable_metrics_collector.conf
# 重启容器
docker restart rabbitmq
# 15672是管理界面端口, 5672是访问端口
# RabbitMQ启用stomp协议
rabbitmq-plugins enable rabbitmq_stomp
rabbitmq-plugins enable rabbitmq_web_stomp
# 创建es网络
docker network create es-net
# 拉取es镜像
docker pull elasticsearch:7.12.1
# 运行es
docker run -d \
--name es \
--restart=always \
-e "ES_JAVA_OPTS=-Xms512m -Xmx512m" \
-e "discovery.type=single-node" \
-v es-data:/usr/share/elasticsearch/data \
-v es-plugins:/usr/share/elasticsearch/plugins \
--privileged \
--network es-net \
-p 9200:9200 \
-p 9300:9300 \
elasticsearch:7.12.1
# 拉取kibana,注意版本要和es的一致
docker pull kibana:7.12.1
# 运行es
docker run -d \
--name kibana \
--restart=always \
-e ELASTICSEARCH_HOSTS=http://es:9200 \
--network=es-net \
-p 5601:5601 \
kibana:7.12.1
# 配置ik[中文]分词器
docker volume inspect es-plugins
结果:
[
{
"CreatedAt": "2022-05-06T10:06:34+08:00",
"Driver": "local",
"Labels": null,
"Mountpoint": "/var/lib/docker/volumes/es-plugins/_data",
"Name": "es-plugins",
"Options": null,
"Scope": "local"
}
]
# 将ik解压,导入到_data下
# 重启es
docker restart es
使用的mysq镜像版本是5.7
主机IP: 192.168.174.160 端口: 33306 路径: /root/mysql/master
从机IP: 192.168.174.160 端口: 33309 路径: /root/mysql/slave
# master主机my.cnf (文件路径: /root/mysql/master/conf/my.cnf)
[client]
default_character_set=utf8
[mysqld]
## 数据库字符集编码
collation_server=utf8_general_ci
character_set_server=utf8
## 设置server_id,同一局域网中需要唯一
server_id=520
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
binlog-ignore-db=infomation_schema
## 开启二进制日志功能
log-bin=master-binlog
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=5M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理天数,默认值为0,表示不自动清理.
#expire_logs_days=0
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断.
## 如: 1062错误是指一些主键重复, 1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
# slave主机my.cnf (文件路径: /root/mysql/slave/conf/my.cnf)
[client]
default_character_set=utf8
[mysqld]
## 数据库字符集编码
collation_server=utf8_general_ci
character_set_server=utf8
## 设置server_id,同一局域网中需要唯一
server_id=521
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
binlog-ignore-db=infomation_schema
## 开启二进制日志功能,以备Slave作为其它数据库实例的Master时使用
log-bin=slave-bin-log
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=5M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理天数.默认值为0,表示不自动清理.
#expire_logs_days=0
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断.
## 如: 1062错误是指一些主键重复, 1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
## relay_log配置中继日志
relay_log=slave-relay-bin
## log_slave_updates表示slave将复制事件写进自己的二进制日志
log_slave_updates=1
## slave设置为只读(具有super权限的用户除外)
read_only=1
# 先启动master主机
docker run -p 33306:3306 --name mysql-master \
-v /root/mysql/master/log:/var/log/mysql \
-v /root/mysql/master/data:/var/lib/mysql \
-v /root/mysql/master/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.7
# 进入master的mysql,创建slave用户,并授权
CREATE USER 'slave'@'%' IDENTIFIED BY '123456';
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'slave'@'%';
# 创建master的my.cnf文件并重启master容器
# 记录master的状态信息
show master status;
# binlog名称: master-binlog.000001 位置: 154
# 启动slave主机
docker run -p 33309:3306 --name mysql-slave \
-v /root/mysql/slave/log:/var/log/mysql \
-v /root/mysql/slave/data:/var/lib/mysql \
-v /root/mysql/slave/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.7
# 创建slave的my.cnf文件并重启slave容器
# 进入slave的mysql,配置主从复制
change master to master_host='192.168.174.160', master_user='slave', master_password='123456', master_port=33306, master_log_file='master-binlog.000001', master_log_pos=154, master_connect_retry=30;
# 查看从机状态
show slave status \G;
# 开启主从复制
start slave;
# 执行show slave status \G;看其中的Slave_IO_Running和Slave_SQL_Running是否都是yes
三台机器(133, 134, 135), 一台扩容/缩容机器(160), redis的docker版本为6.0.8
# 在133, 134, 135机器上创建6个redis节点
# 133
docker run -d --name redis-node-1 --net host --privileged=true -v /root/redis-cluster/node-1:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16391
docker run -d --name redis-node-2 --net host --privileged=true -v /root/redis-cluster/node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16392
# 134
docker run -d --name redis-node-3 --net host --privileged=true -v /root/redis-cluster/node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16391
docker run -d --name redis-node-4 --net host --privileged=true -v /root/redis-cluster/node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16392
# 135
docker run -d --name redis-node-5 --net host --privileged=true -v /root/redis-cluster/node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16391
docker run -d --name redis-node-6 --net host --privileged=true -v /root/redis-cluster/node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16392
# 进入容器中,进行集群配置
docker exec -it redis-node-1 /bin/bash
# 集群创建命令
redis-cli --cluster create 192.168.174.133:16391 192.168.174.133:16392 192.168.174.134:16391 192.168.174.134:16392 192.168.174.135:16391 192.168.174.135:16392 --cluster-replicas 1
# 进入redis命令行查看集群状态
cluster info # 查看集群信息
cluster nodes # 查看集群节点
# 可以看到以下集群节点信息
3022692110c27bc250f433a3878eb460701e46b3 192.168.174.134:16392@26392 slave 8c75c99669fcea103d748859e7c4ce33956815cc 0 1670325788835 1 connected
d9407ef20d7e7852d3b88d5cd739c415aee26be1 192.168.174.135:16392@26392 slave 60ffc8502fd7eef50932c1b40f475c8649a42b79 0 1670325786000 3 connected
d81653bc99f183312c68768d40cddcba2004e4cd 192.168.174.133:16392@26392 slave 46d6da06a49cde19ef5b3ca7a3bc9cf69a86c489 0 1670325786799 5 connected
60ffc8502fd7eef50932c1b40f475c8649a42b79 192.168.174.134:16391@26391 master - 0 1670325786000 3 connected 5461-10922
46d6da06a49cde19ef5b3ca7a3bc9cf69a86c489 192.168.174.135:16391@26391 master - 0 1670325787820 5 connected 10923-16383
8c75c99669fcea103d748859e7c4ce33956815cc 192.168.174.133:16391@26391 myself,master - 0 1670325787000 1 connected 0-5460
# 由此集群构建成功
redis-cli --cluster check 192.168.174.133:16391 # 检查集群状态
# 在160机器上创建两个redis节点
docker run -d --name redis-node-7 --net host --privileged=true -v /root/redis-cluster/node-7:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16391
docker run -d --name redis-node-8 --net host --privileged=true -v /root/redis-cluster/node-8:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 16392
# 添加主节点到集群中
redis-cli --cluster add-node 192.168.174.160:16391 192.168.174.133:16391
# 检查集群状态
redis-cli --cluster check 192.168.174.133:16391
# 可看到已经添加一个master节点,但无slots(槽位)
192.168.174.160:16391 (0af3e913...) -> 0 keys | 0 slots | 0 slaves.
# 重新划分槽位
redis-cli --cluster reshard 192.168.174.133:16391
# 选择all从所有的节点中,划分4096个槽位给160,注意节点的uuid不要粘贴错误
M: 0af3e9136c1d638712838147822f8eea764e0149 192.168.174.160:16391
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
# 添加160的从节点到主节点上
redis-cli --cluster add-node 192.168.174.160:16392 192.168.174.160:16391 --cluster-slave --cluster-master-id 0af3e9136c1d638712838147822f8eea764e0149
# 进入redis命令行,可看到192.168.174.160:16392已是192.168.174.160:16391的从节点
cluster nodes # 查看集群节点
422a59c6b012901823a85b39aa30a38ed1df4dca 192.168.174.160:16392@26392 slave 0af3e9136c1d638712838147822f8eea764e0149 0 1670326672000 7 connected
3022692110c27bc250f433a3878eb460701e46b3 192.168.174.134:16392@26392 slave 8c75c99669fcea103d748859e7c4ce33956815cc 0 1670326673000 1 connected
d9407ef20d7e7852d3b88d5cd739c415aee26be1 192.168.174.135:16392@26392 slave 60ffc8502fd7eef50932c1b40f475c8649a42b79 0 1670326673531 3 connected
0af3e9136c1d638712838147822f8eea764e0149 192.168.174.160:16391@26391 master - 0 1670326674549 7 connected 0-1364 5461-6826 10923-12287
d81653bc99f183312c68768d40cddcba2004e4cd 192.168.174.133:16392@26392 slave 46d6da06a49cde19ef5b3ca7a3bc9cf69a86c489 0 1670326671000 5 connected
60ffc8502fd7eef50932c1b40f475c8649a42b79 192.168.174.134:16391@26391 master - 0 1670326672510 3 connected 6827-10922
46d6da06a49cde19ef5b3ca7a3bc9cf69a86c489 192.168.174.135:16391@26391 master - 0 1670326668000 5 connected 12288-16383
8c75c99669fcea103d748859e7c4ce33956815cc 192.168.174.133:16391@26391 myself,master - 0 1670326671000 1 connected 1365-5460
# 由此redis扩容完成
# 缩容基本上是把扩容步骤倒过来操作一遍
# 先删除160上的从节点
redis-cli --cluster del-node 192.168.174.160:16392 422a59c6b012901823a85b39aa30a38ed1df4dca
# 再次检查集群状态,可看到从节点已经被删除
redis-cli --cluster check 192.168.174.133:16391
# 再次进行分配槽位,这里要把160机器上的槽位全部转移到133这台机器上
redis-cli --cluster reshard 192.168.174.133:16391
# 160主节点id: 0af3e9136c1d638712838147822f8eea764e0149
# 133主节点id: 8c75c99669fcea103d748859e7c4ce33956815cc
# 再次检查集群状态
redis-cli --cluster check 192.168.174.133:16391 # 检查集群状态
# 可看到160主节点已经无槽位,且已经转移到133主节点上
192.168.174.160:16391 (0af3e913...) -> 0 keys | 0 slots | 0 slaves.
192.168.174.133:16391 (8c75c996...) -> 0 keys | 8192 slots | 1 slaves.
# 删除160主节点
redis-cli --cluster del-node 192.168.174.160:16391 0af3e9136c1d638712838147822f8eea764e0149
# 再次检查集群状态,可看到已经没有160主节点信息
redis-cli --cluster check 192.168.174.133:16391 # 检查集群状态
# 由此缩容完成