本示例使用9台 Linux虚拟机体验 TiDB 完整拓扑的集群,并模拟生产环境下的部署步骤。
实例 | 个数 | IP地址 | 配置 |
---|---|---|---|
TiProxy | 1 | 192.168.68.81 | 使用默认端口和其他配置 |
TiDB | 2 | 192.168.68.82-83 | 使用默认端口和其他配置 |
PD | 3 | 192.168.68.84-86 | 使用默认端口和其他配置 |
TiKV | 3 | 192.168.68.87-89 | 使用递增的端口号以避免冲突 |
TiFlash | 1 | 192.168.68.81 | 使用默认端口和其他配置 |
Monitor | 1 | 192.168.68.81 | 使用默认端口和其他配置 |
实验环境
中控机: 192.168.68.81
TiProxy: 192.168.68.81
TiDB: 192.168.68.82、192.168.68.83
PD: 192.168.68.84、192.168.68.85、192.168.68.86
TiKV: 192.168.68.87、192.168.68.88、192.168.68.89
操作系统:RockyLinux-9.5
准备工作
安装必要的工具
yum -y install numa* hdparm tar
关闭防火墙
systemctl stop firewalld.service
systemctl disbale firewalld.service
systemctl status firewalld.service
关闭交换分区
swapoff -a
free -h
#注释 /etc/fstab 配置文件中 swap 挂载项
vi /etc/fstab
禁用THP
#查看是否启用
[root@localhost ~]# cat /sys/kernel/mm/transparent_hugepage/defrag
[always] madvise never
[root@localhost ~]# cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
vi /etc/rc.d/rc.local
#以下为新增
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
#赋予执行权限
chmod +x /etc/rc.d/rc.local
#在命令行使用root执行以下命令(和配置文件中新增命令一样〉
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
#或者增加下列内容:
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
下载安装包
官网下载安装包:tidb-community-server-v8.5.3-linux-amd64.tar.gz和tidb-community-toolkit-v8.5.3-linux-amd64.tar.gz
下载地址:https://cn.pingcap.com/product-community/
创建用户及相关目录
#创建tidb用户
useradd tidb
echo "tidb:TiDb@123" | chpasswd
#主机文件
cat > /etc/hosts <<EOF
127.0.0.1 localhost
192.168.68.81 TiDb85Srv81
192.168.68.82 TiDb85Srv82
192.168.68.83 TiDb85Srv83
192.168.68.84 TiDb85Srv84
192.168.68.85 TiDb85Srv85
192.168.68.86 TiDb85Srv86
192.168.68.87 TiDb85Srv87
192.168.68.88 TiDb85Srv88
192.168.68.89 TiDb85Srv89
EOF
#配置tidb用户sudo权限
visudo
tidb ALL=(ALL) NOPASSWD: ALL
#中控机到各目标机的ssh免密登录
su - tidb
ssh-keygen -t rsa
for i in {1..9};do ssh-copy-id -i ~/.ssh/id_rsa.pub tidb@TiDb85Srv8$i;done
#ssh-copy-id -p <端口号> <用户名>@<主机名或IP地址>
#验证免密登录
for i in {1..9};do ssh tidb@TiDb85Srv8$i -i ~/.ssh/id_rsa hostname;done
创建相关目录并修改属主
# 创建`deploy_dir`和`data_dir`存放目录
mkdir -p /opt/tidb-deploy
mkdir -p /data/tidb-data
chown -R tidb.tidb /opt/tidb-deploy
chown -R tidb.tidb /data/tidb-data
部署离线环境tiup组件
cd /mytmp
#解压安装包
tar zxvf tidb-community-server-v8.5.3-linux-amd64.tar.gz
tar zxvf tidb-community-toolkit-v8.5.3-linux-amd64.tar.gz
#部署离线环境 TiUP 组件
chown -R tidb.tidb tidb-community*
su - tidb
cd /mytmp/tidb-community-server-v8.5.3-linux-amd64/
sh local_install.sh
#根据提示执行
source /home/tidb/.bash_profile
验证tiup组件部署
which tiup
tiup --version
tiup mirror show
合并离线包
cp -rp keys ~/.tiup/
tiup mirror merge ../tidb-community-toolkit-v8.5.3-linux-amd64
tiup mirror show
初始化拓扑文件
执行命令,生成集群初始化配置文件:
tiup cluster template > topology.yaml
topology.yaml修改后如下:
# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
user: "tidb"
ssh_port: 22
deploy_dir: "/opt/tidb-deploy"
data_dir: "/data/tidb-data"
# # Monitored variables are applied to all the machines.
monitored:
node_exporter_port: 9100
blackbox_exporter_port: 9115
deploy_dir: "/opt/tidb-deploy/monitored-9100"
data_dir: "/data/tidb-data/monitored-9100"
log_dir: "/opt/tidb-deploy/monitored-9100/log"
server_configs:
tiproxy:
ha.virtual-ip: "192.168.68.80/24"
ha.interface: "ens33"
tidb:
log.slow-threshold: 300
tikv:
readpool.storage.use-unified-pool: false
readpool.coprocessor.use-unified-pool: true
pd:
replication.enable-placement-rules: true
replication.location-labels: ["zone","idc","host"]
tiflash:
logger.level: "info"
tiproxy_servers:
- host: 192.168.68.81
port: 6000
status_port: 3080
deploy_dir: "/opt/tidb-deploy/tidb-6000"
pd_servers:
- host: 192.168.68.84
name: "pd-1"
client_port: 2379
peer_port: 2380
deploy_dir: "/opt/tidb-deploy/pd-2379"
data_dir: "/data/tidb-data/pd-2379"
log_dir: "/opt/tidb-deploy/pd-2379/log"
- host: 192.168.68.85
name: "pd-2"
client_port: 2379
peer_port: 2380
deploy_dir: "/opt/tidb-deploy/pd-2379"
data_dir: "/data/tidb-data/pd-2379"
log_dir: "/opt/tidb-deploy/pd-2379/log"
- host: 192.168.68.86
name: "pd-3"
client_port: 2379
peer_port: 2380
deploy_dir: "/opt/tidb-deploy/pd-2379"
data_dir: "/data/tidb-data/pd-2379"
log_dir: "/opt/tidb-deploy/pd-2379/log"
tidb_servers:
- host: 192.168.68.82
port: 4000
status_port: 10080
deploy_dir: "/opt/tidb-deploy/tidb-4000"
log_dir: "/opt/tidb-deploy/tidb-4000/log"
- host: 192.168.68.83
port: 4000
status_port: 10080
deploy_dir: "/opt/tidb-deploy/tidb-4000"
log_dir: "/opt/tidb-deploy/tidb-4000/log"
tikv_servers:
- host: 192.168.68.87
port: 20160
status_port: 20180
deploy_dir: "/opt/tidb-deploy/tikv-20160"
data_dir: "/data1/tidb-data/tikv-20160"
log_dir: "/opt/tidb-deploy/tikv-20160/log"
config:
server.labels: { zone: "zone1", idc: "idc1", host: "host1" }
- host: 192.168.68.88
port: 20160
status_port: 20180
deploy_dir: "/opt/tidb-deploy/tikv-20160"
data_dir: "/data2/tidb-data/tikv-20160"
log_dir: "/opt/tidb-deploy/tikv-20160/log"
config:
server.labels: { zone: "zone1", idc: "idc1", host: "host2" }
- host: 192.168.68.89
port: 20160
status_port: 20180
deploy_dir: "/opt/tidb-deploy/tikv-20160"
data_dir: "/data3/tidb-data/tikv-20160"
log_dir: "/opt/tidb-deploy/tikv-20160/log"
config:
server.labels: { zone: "zone1", idc: "idc1", host: "host3" }
tiflash_servers:
- host: 192.168.68.81
tcp_port: 9000
flash_service_port: 3930
flash_proxy_port: 20170
flash_proxy_status_port: 20292
metrics_port: 8234
deploy_dir: "/opt/tidb-deploy/tiflash-9000"
data_dir: "/data/tidb-data/tiflash-9000"
log_dir: "/opt/tidb-deploy/tiflash-9000/log"
monitoring_servers:
- host: 192.168.68.81
port: 9090
deploy_dir: "/opt/tidb-deploy/prometheus-8249"
data_dir: "/data/tidb-data/prometheus-8249"
log_dir: "/opt/tidb-deploy/prometheus-8249/log"
grafana_servers:
- host: 192.168.68.81
port: 3000
deploy_dir: /opt/tidb-deploy/grafana-3000
alertmanager_servers:
- host: 192.168.68.81
web_port: 9093
cluster_port: 9094
deploy_dir: "/opt/tidb-deploy/alertmanager-9093"
data_dir: "/data/tidb-data/alertmanager-9093"
log_dir: "/opt/tidb-deploy/alertmanager-9093/log"
检查环境
#执行如下命令,检查环境
su - tidb
tiup cluster check topology.yaml --user root -p
#发现有很多Fail,执行自修复命令
tiup cluster check topology.yaml --apply --user root -p
#执行完后再次检查
tiup cluster check topology.yaml --user root -p
#可忽略Warn,没有Fail即可执行下一步
执行部署
su - tidb
cd /mytmp/tidb-community-server-v8.5.1-linux-amd64
tiup cluster deploy tidb853 v8.5.3 topology.yaml --user tidb -p
部署成功后回显如下:
查看TiDB管理的集群
tiup cluster list
TiUP 支持管理多个 TiDB 集群,该命令会输出当前通过 TiUP cluster 管理的所有集群信息,包括集群名称、部署用户、版本、密钥信息等。
检查部署的TiDB集群情况
tiup cluster display tidb853
预期输出包括 tidb-test 集群中实例 ID、角色、主机、监听端口和状态(由于还未启动,所以状态为 Down/inactive)、目录信息。
初始化安全启动集群
安全启动后,TiUP 会自动生成 TiDB root 用户的密码,并在命令行界面返回密码。
使用安全启动方式后,不能通过无密码的 root 用户登录数据库,你需要记录命令行返回的密码进行后续操作。
tiup cluster start tidb853 --init
验证集群运行状态
tiup cluster display tidb853
预期结果输出:各节点 Status 状态信息为 Up 说明集群状态正常。
连接TiDB(两个节点)
mysql -h192.168.68.82 -P4000 -uroot -pZ9V5^A@-3_678fRXyY -e "show databases"
mysql -h192.168.68.83 -P4000 -uroot -pZ9V5^A@-3_678fRXyY -e "show databases"
#密码是集群初始化时生成的随机密码(登录后可自行修改)
#或者登录TiProxy查看
mysql -h192.168.68.81 -P6000 -uroot -pZ9V5^A@-3_678fRXyY -e "show databases"
#yum install mysql
修改root用户密码
--登录TiProxy操作
mysql -h192.168.68.81 -P6000 -uroot -pZ9V5^A@-3_678fRXyY
mysql> select user, host from mysql.user;
mysql> alter user root@'%' identified by 'Root@123';
mysql> flush privileges;
访问平台(PD节点)
输入root账号,及密码登录 http://192.168.68.84:2379/dashboard/