进学阁

业精于勤荒于嬉,行成于思毁于随

0%

分布式架构下

12.通过Zookeeper配置Mycat高可用

12.1 ZooKeeper是什么?

ZooKeeper是一个集中的服务,用于配置管理命名服务、提供分布式同步和提供组服务 等。 本文将通过ZK配置集中管理去管理Mycat配置(schema.xmlserver.xmlrule.xml …),即:将mycat的配置全部放到ZK上进行监听,一旦配置信息改变,就从ZK获取最新的 配置信息应用到mycat服务中

12.2 通过ZooKeeper集群管理Mycat配置

12.2.0 清理测试环境

1
2
3
[root@db01 ~]# pkill java
[root@db01 ~]# rm -rf /data/app/mycat/
[root@db01 ~]# rm -rf /data/app/zookeeper/

12.2.1 安装配置ZK

节点 部署的软件 数据库实例
db01 jdk1.8、mycat1.6.7.4、zookeeper3.5.8、MySQL 8.0.20 3307,3308,3309, 3310
db02 jdk1.8、mycat1.6.7.4、zookeeper3.5.8、MySQL 8.0.20 3307,3308,3309, 3310
db03 jdk1.8、mycat1.6.7.4、zookeeper3.5.8、MySQL 8.0.20 3306
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
1.配置JDK环境
linux:
export JAVA_HOME=/usr/local/jdk1.8
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
windows:
略。
#2.安装zk
cd /data/app/
wget http://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.5.8/apachezookeeper-3.5.8.tar.gz
cd /data/app
tar xf apache-zookeeper-3.5.8.tar.gz
ln -s apache-zookeeper-3.5.8 zookeeper
3. 安装maven
# 安装
wget http://mirrors.hust.edu.cn/apache/maven/maven-3/3.6.3/binaries/apachemaven-3.6.3-bin.tar.gz
# 解压
tar -zxvf apache-maven-3.6.3-bin.tar.gz
# 添加环境变量
vim /etc/profile
export MAVEN_HOME=/usr/local/maven-3.6.3
export PATH=$MAVEN_HOME/bin:$PATH
# 重新加载环境变量
source /etc/profile
# 查看是否安装成功
mvn -v
# mvn编译zookeeper
cd /data/app/zookeeper
cd zookeeper-server
mvn package -Dmaven.test.skip=true
4. 拷贝ZK到各个节点
scp -r zookeeper/ 10.0.0.53:/data/app/
scp -r zookeeper/ 10.0.0.52:/data/app/
5. 配置启动zk
cd zookeeper
rm -rf data log
mkdir data log
cd conf/
\cp zoo_sample.cfg zoo.cfg
# 修改三个节点以下配置:
vim zoo.cfg
dataDir=/data/app/zookeeper/data
dataLogDir=/data/app/zookeeper/log
clientPort=2181
server.1=10.0.0.51:2888:3888
server.2=10.0.0.52:2888:3888
server.3=10.0.0.53:2888:3888
#拷贝到各个节点
scp zoo.cfg 10.0.0.52:/data/app/zookeeper/conf/
zoo.cfg
scp zoo.cfg 10.0.0.53:/data/app/zookeeper/conf/
zoo.cfg
# 添加myid文件
vim /data/app/zookeeper/data/myid --->1、2、3三个节点需要不同id
# 三个节点分表启动zk
/data/app/zookeeper/bin/zkServer.sh start

#查看各个节点zk状态
[root@db02 bin]# /data/app/zookeeper/bin/zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /data/app/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: leader

[root@db01 app]# /data/app/zookeeper/bin/zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /data/app/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@db01 app]# /data/app/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/app/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower

[root@db03 bin]# /data/app/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/app/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower

12.3 配置Mycat基础环境

拓扑图

12.3.1 安装3台Mycat节点

12.3.2 配置成取模分片+本地SEQ

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# schema.xml
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100"
dataNode="sh1">
<table name="oldguo" dataNode="sh1,sh2" rule="mod-long" />
</schema>
<dataNode name="sh1" dataHost="oldguo1" database= "taobao" />
<dataNode name="sh2" dataHost="oldguo2" database= "taobao" />
<dataHost name="oldguo1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1">
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="10.0.0.51:3307" user="root" password="123">
<readHost host="db2" url="10.0.0.51:3309" user="root"
password="123" />
</writeHost>
<writeHost host="db3" url="10.0.0.52:3307" user="root" password="123">
<readHost host="db4" url="10.0.0.52:3309" user="root"
password="123" />
</writeHost>
</dataHost>
<dataHost name="oldguo2" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="1">
<heartbeat>select user()</heartbeat>
<writeHost host="db1" url="10.0.0.51:3308" user="root" password="123">
<readHost host="db2" url="10.0.0.51:3310" user="root"
password="123" />
</writeHost>
<writeHost host="db3" url="10.0.0.52:3308" user="root" password="123">
<readHost host="db4" url="10.0.0.52:3310" user="root"
password="123" />
</writeHost>
</dataHost>
</mycat:schema>
# rule.xml
<tableRule name="mod-long">
<rule>
<columns>sharding_id</columns>
<algorithm>mod-long</algorithm>
</rule>
</tableRule>
<function name="mod-long"
class="io.mycat.route.function.PartitionByMod">
<!-- how many data nodes -->
<property name="count">2</property>
</function>
server.xml
<property name="sequnceHandlerType">0</property>
create table oldguo(
id bigint(20) not null primary key auto_increment,
name varchar(20),
sharding_id bigint(20),
company_id bigint(20)
);

<!--最后: 同步所有配置到各个节点mycat。-->

12.3.3 启动mycat测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# 3.数据录入
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy001', 10011, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy002', 10012, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy003', 10013, 3);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy004', 10014, 4);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy005', 10015, 5);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy006', 10016, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy007', 10017, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy008', 10018, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy009', 10019, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy010', 10020, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy011', 10021, 3);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy012', 10022, 4);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy013', 10023, 5);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy014', 10024, 5);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy015', 10025, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy016', 10026, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy017', 10027, 3);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy018', 10028, 3);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy019', 10029, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy021', 10030, 1);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy022', 10031, 2);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy023', 10032, 5);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy024', 10033, 4);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy025', 10034, 5);
insert into oldguo(id, name, sharding_id, company_id) values(next value for
MYCATSEQ_GLOBAL, 'oldboy026', 10035, 3);

12.4 配置Mycat支持ZK管理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#修改mycat配置文件,支持zk管理
[root@db01 conf]# vim myid.properties

loadZk=true
zkURL=10.0.0.51:2181,10.0.0.52:2821,10.0.0.53:2821
clusterId=mycat-cluster-1
myid=mycat_fz_01
clusterSize=3
clusterNodes=mycat_fz_01,mycat_fz_02,mycat_fz_03
#server booster ; booster install on db same server,will reset all
minCon to 2
type=server
boosterDataHosts=oldguo1,oldguo2

# 拷贝mycat初始配置,将配置好的conf目录下的配置文件复制到同级目录zkconf下
\cp /data/app/mycat/conf/*.xml /data/app/mycat/conf/zkconf/
\cp /data/app/mycat/conf/*.properties /data/app/mycat/conf/zkconf/
\cp /data/app/mycat/conf/*.txt /data/app/mycat/conf/zkconf/
\cp /data/app/mycat/conf/*.conf /data/app/mycat/conf/zkconf/

# 初始化ZK数据
cd /data/app/mycat/bin
./init_zk_data.sh

解决报错:
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="INFO" shutdownHook="disable">
...
</Configuration>

12.5使用ZooInspector工具管理ZK

  1. 下载地址:
    https://issues.apache.org/jira/secure/attachment/12436620/ZooInspector.zip
  2. 解压,进入build目录,运行 java -jar zookeeper-dev-ZooInspector.jar

  1. https://www.bejson.com/ 在线美化代码工具。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
[{
"balance": 1,
"maxCon": 1000,
"minCon": 10,
"name": "oldguo1",
"writeType": 0,
"switchType": 1,
"dbType": "mysql",
"dbDriver": "native",
"heartbeat": "select user()",
"writeHost": [{
"host": "db1",
"url": "10.0.0.51:3307",
"password": "123",
"user": "root",
"readHost": [{
"host": "db2",
"url": "10.0.0.51:3309",
"password": "123",
"user": "root"
}]
}, {
"host": "db3",
"url": "10.0.0.52:3307",
"password": "123",
"user": "root",
"readHost": [{
"host": "db4",
"url": "10.0.0.52:3309",
"password": "123",
"user": "root"
}]
}]
}, {
"balance": 1,
"maxCon": 1000,
"minCon": 10,
"name": "oldguo2",
"writeType": 0,
"switchType": 1,
"dbType": "mysql",
"dbDriver": "native",
"heartbeat": "select user()",
"writeHost": [{
"host": "db1",
"url": "10.0.0.51:3308",
"password": "123",
"user": "root",
"readHost": [{
"host": "db2",
"url": "10.0.0.51:3310",
"password": "123",
"user": "root"
}]
}, {
"host": "db3",
"url": "10.0.0.52:3308",
"password": "123",
"user": "root",
"readHost": [{
"host": "db4",
"url": "10.0.0.52:3310",
"password": "123",
"user": "root"
}]
}]
}]

12.6 mycat-web应用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# 注意:mycat-web默认不支持MySQL8.0,需要做以下操作
cd /data/app/mycat/lib/
上传8.0的连接驱动
chmod 777 mysql-connector-java-8.0.21.jar

#上传解压软件
tar -xvf Mycat-web-1.0-SNAPSHOT-20160331220346-linux.tar.gz -C /usr/local/

# 修改zookeeper配置信息:
cd /usr/local/mycat-web/mycat-web/WEB-INF/classes
vim mycat.properties
zookeeper=127.0.0.1:2181
sqlonline.server=localhost
sqlonline.user=dev
sqlonline.passwd=123456

# 创建用户
create user dev@'localhost' identified with mysql_native_password by
'123456';
grant all on *.* to dev@'localhost' ;

# 导入模板数据
cd /usr/local/mycat-web/mycat-web/WEB-INF/db
create database mycat_eye charset utf8;
use mycat_eye;
source mycat-web.sql

# 启动:
cd /usr/local/mycat-web/
nohup ./start.sh &

13.Mycat的节点扩容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
1. 确认数据分布
2. 修改schema.xml
cp schema.xml newSchema.xml
vim newSchema.xml
修改dataNode="sh1,sh2,sh3"(新的分片定义)

3. 修改rule.xml
cp rule.xml newRule.xml
vim newRule.xml
修改节点个数

4. 修改migrateTables.properties文件。
写明要迁移的schema和表,多张表用逗号隔开
TESTDB=oldguo

5.停止mycat和zk

6. 执行dataMigrate.sh开始迁移(扩容)。

13.Mycat+Mycat-web+PXC+ZK+Sequnce分布式集群实 现

13.1 节点规划

节点 软件
10.0.0.11-pxc1- node01 pxb2.4.14、pxc-5.7.25
10.0.0.12-pxc1- node02 pxb2.4.14、pxc-5.7.25
10.0.0.13-pxc1- node03 pxb2.4.14、pxc-5.7.25
10.0.0.14-pxc2- node04 pxb2.4.14、pxc-5.7.25、mycat1.6.7.4、zookeeper3.5.8
10.0.0.15-pxc2- node05 pxb2.4.14、pxc-5.7.25、mycat1.6.7.4、zookeeper3.5.8
10.0.0.16-pxc2- node06 pxb2.4.14、pxc-5.7.25、mycat1.6.7.4、zookeeper3.5.8、haproxy、mycatweb

13.2 环境请理

1
2
3
4
5
6
[root@db01 bin]# pkill mysqld
[root@db01 bin]# pkill java
[root@db01 bin]# \rm -rf /data/33*
[root@db01 bin]# rm -rf /etc/my.*
[root@node01 ~]# rm -rf /etc/percona*
[root@node01 ~]# yum remove percona* -y

13.3 2组PXC集群实施

13.3.1 软件上传及解压(6个节点都做)

1
2
3
4
5
6
7
8
# 创建目录,分别上传pxc和pxb的软件
mkdir -p /usr/local/pxc
mkdir -p /usr/local/pxb
mv Percona-XtraBackup-2.4.14-ref675d4-el7-x86_64-bundle.tar pxb/
mv Percona-XtraDB-Cluster-5.7.25-31.35-r463-el7-x86_64-bundle.tar pxc/
tar xf Percona-XtraBackup-2.4.14-ref675d4-el7-x86_64-bundle.tar
tar xf Percona-XtraDB-Cluster-5.7.25-31.35-r463-el7-x86_64-bundle.tar

13.3.2 软件安装(6个节点都做)

1
2
3
4
5
6
7
8
9
10
# 安装pxb软件
wget -O /etc/yum.repos.d/CentOS-Base.repo
http://mirrors.aliyun.com/repo/Centos-7.repo
cd /usr/local/pxb
yum install -y percona-xtrabackup-24-2.4.14-1.el7.x86_64.rpm

#安装pxc软件
cd /usr/local/pxc/
rpm -qa | grep Percona-Server | grep -v compat |xargs rpm -e --nodeps
yum install -y *.rpm

13.3.3 配置PXC集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#启动数据库
systemctl start mysql

# 修改密码和创建用户
grep "temporary password" /var/log/mysqld.log
>Pgg0k>B_Gr7
ALTER USER 'root'@'localhost' IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '123' WITH GRANT
OPTION;
flush privileges;

## 创建pxc的sst账户用户同步
CREATE USER 'sstuser'@'localhost' IDENTIFIED BY '123';
GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO
'sstuser'@'localhost';
FLUSH PRIVILEGES;

[root@node01 percona-xtradb-cluster.conf.d]# cat wsrep.cnf
[mysqld]
# Path to Galera library
wsrep_provider=/usr/lib64/galera3/libgalera_smm.so

# Cluster connection URL contains IPs of nodes
#If no IP is found, this implies that a new cluster needs to be created,
#in order to do that you need to bootstrap this node
wsrep_cluster_address=gcomm://10.0.0.11,10,10.0.0.12,10.0.0.13

# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW

# MyISAM storage engine has only experimental support
default_storage_engine=InnoDB

# Slave thread to use
wsrep_slave_threads= 8
wsrep_log_conflicts

# This changes how InnoDB autoincrement locks are managed and is a
requirement for Galera
innodb_autoinc_lock_mode=2

# Node IP address
wsrep_node_address=10.0.0.11

# Cluster name
wsrep_cluster_name=pxc-cluster

#If wsrep_node_name is not specified, then system hostname will be used
wsrep_node_name=node01

#pxc_strict_mode allowed values: DISABLED,PERMISSIVE,ENFORCING,MASTER
pxc_strict_mode=ENFORCING

# SST method
#wsrep_sst_method=xtrabackup-v2

#Authentication for SST method
wsrep_sst_auth="sstuser:123"
wsrep_sst_method = rsync

# 在第一节点引导启动集群
service mysql@bootstrap.service start
show global status like 'wsrep%';
# 其他节点
service mysql start

13.4 部署Mycat集群

13.4.1 安装jdk

1
2
3
4
5
6
7
8
# 上传软件至/usr/local 解压


# 设置环境变量
export JAVA_HOME=/usr/local/jdk1.8
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=/usr/local/mycat/bin:$PATH

13.4.2 上传至/usr/local解压Mycat

13.5上传zookeeper进行配置启动

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# 上传软件至/usr/local/并解压
tar xf apache-zookeeper-3.5.8-mvn.tar
mv apache-zookeeper-3.5.8 zk

# 创建相关目录并修改配置文件
cd zk
rm -rf data log
mkdir data log
cd conf/
\cp zoo_sample.cfg zoo.cfg

# 修改三个节点以下配置:
vim zoo.cfg
dataDir=/data/app/zookeeper/data
dataLogDir=/data/app/zookeeper/log
clientPort=2181
server.1=10.0.0.51:2888:3888
server.2=10.0.0.52:2888:3888
server.3=10.0.0.53:2888:3888

# 拷贝到各个节点
scp zoo.cfg 10.0.0.52:/usr/local/zk/conf/
scp zoo.cfg 10.0.0.53:/usr/local/zk/conf/

# 添加myid文件
vim /usr/local/zk/data/myid --->1、2、3三个节点需要不同id

# 三个节点分表启动zk
/usr/local/zk/bin/zkServer.sh start

# 查看各个节点zk状态
[root@db02 bin]# /usr/local/zk/bin/zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /data/app/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: leader

13.6 将PXC集群加入Mycat管理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# vim schema.xml
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100"
dataNode="sh1">
<table name="oldguo" dataNode="sh1,sh2" rule="mod-long" />
</schema>
<dataNode name="sh1" dataHost="oldguo1" database= "taobao" />
<dataNode name="sh2" dataHost="oldguo2" database= "taobao" />
<dataHost name="oldguo1" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="3">
<heartbeat>show status like 'wsrep%'</heartbeat>
<writeHost host="db1" url="10.0.0.11:3306" user="root" password="123"
/>
<writeHost host="db2" url="10.0.0.12:3306" user="root" password="123"
/>
<writeHost host="db3" url="10.0.0.13:3306" user="root" password="123"
/>
</dataHost>
<dataHost name="oldguo2" maxCon="1000" minCon="10" balance="1"
writeType="0" dbType="mysql" dbDriver="native" switchType="3">
<heartbeat>show status like 'wsrep%'</heartbeat>
<writeHost host="db1" url="10.0.0.14:3306" user="root" password="123"
/>
<writeHost host="db2" url="10.0.0.15:3306" user="root" password="123"
/>
<writeHost host="db3" url="10.0.0.16:3306" user="root" password="123"
/>
</dataHost>
</mycat:schema>
# rule.xml
<tableRule name="mod-long">
<rule>
<columns>sharding_id</columns>
<algorithm>mod-long</algorithm>
</rule>
</tableRule>
<function name="mod-long"
class="io.mycat.route.function.PartitionByMod">
<!-- how many data nodes -->
<property name="count">2</property>
</function>

13.7 Mycat和zookeeper结合

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 修改mycat配置文件,支持zk管理
[root@db01 conf]# vim myid.properties
loadZk=true
zkURL=10.0.0.14:2181,10.0.0.15:2821,10.0.0.16:2821
clusterId=mycat-cluster-1
myid=mycat_fz_01
clusterSize=3
clusterNodes=mycat_fz_01,mycat_fz_02,mycat_fz_03
#server booster ; booster install on db same server,will reset all
minCon to 2
type=server
boosterDataHosts=oldguo1,oldguo2

# 拷贝mycat初始配置,将配置好的conf目录下的配置文件复制到同级目录zkconf下
\cp /usr/local/mycat/conf/*.xml /usr/local/mycat/conf/zkconf/
\cp /usr/local/mycat/conf/*.properties /usr/local/mycat/conf/zkconf/
\cp /usr/local/mycat/conf/*.txt /usr/local/mycat/conf/zkconf/
\cp /usr/local/mycat/conf/*.conf /usr/local/mycat/conf/zkconf/

# 初始化ZK数据
cd /usr/local/mycat/bin
./init_zk_data.sh
解决报错:
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="INFO" shutdownHook="disable">
...
</Configuration>

使用ZooInspector工具管理ZK

haproxy

keepalived