sharing-jdbc实现读写分离及分库分表
- 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
- 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
- 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。
sharing-jdbc实现读写分离及分库分表需求:
分库:按业务线business_id将不同业务线的订单存储在不同的数据库上;
分表:按user_id字段将不同⽤户的订单存储在不同的表上,为⽅便直接⽤⾮分⽚字段order_id查询,可使⽤基因法;读写分离:为缓解主库的压⼒,读操作访问从库;
库表SQL
-- 主库
CREATE DATABASE `database_103`;
CREATE DATABASE `database_112`;
-- 从库
CREATE DATABASE `database_slave_103`;
CREATE DATABASE `database_slave_112`;
--每个库上分别建⽴如下表
CREATE TABLE `t_order_0` (
`id` bigint(20) NOT NULL,
`order_id` bigint(20) NOT NULL,
`user_id` bigint(20) NOT NULL,
`business_id` bigint(20) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `t_order_1` (
`id` bigint(20) NOT NULL,
`order_id` bigint(20) NOT NULL,
`user_id` bigint(20) NOT NULL,
`business_id` bigint(20) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `t_order_2` (
`id` bigint(20) NOT NULL,
`order_id` bigint(20) NOT NULL,
`user_id` bigint(20) NOT NULL,
`business_id` bigint(20) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `t_order` VALUES (1,1,112);
CREATE TABLE `t_order_3` (
`id` bigint(20) NOT NULL,
`order_id` bigint(20) NOT NULL,
`user_id` bigint(20) NOT NULL,
`business_id` bigint(20) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
pom.xml
<!-- sharding-jdbc -->
<dependency>
<groupId>com.dangdang</groupId>
<artifactId>sharding-jdbc-core</artifactId>
<version>1.4.2</version>
</dependency>
<dependency>
<groupId>com.dangdang</groupId>
<artifactId>sharding-jdbc-config-spring</artifactId>
<version>1.4.0</version>
</dependency>
spring-database.xml
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="/schema/beans"
xmlns:xsi="/2001/XMLSchema-instance"
xmlns:tx="/schema/tx"
xmlns:rdb="/schema/ddframe/rdb"
xsi:schemaLocation="/schema/beans
/schema/beans/spring-beans.xsd
/schema/tx
/schema/tx/spring-tx.xsd
/schema/ddframe/rdb
/schema/ddframe/rdb/rdb.xsd">
<bean id="database_112" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"
destroy-method="close">
<property name="driverClassName" value="com.mysql.jdbc.Driver"/>
<property name="url" value="${jdbc.url.112}"></property>
<property name="username" value="${ername.112}"></property>
<property name="password" value="${jdbc.password.112}"></property>
<property name="maxActive" value="100"/>
<property name="initialSize" value="50"/>
<property name="maxWait" value="60000"/>
<property name="minIdle" value="5"/>
</bean>
<bean id="database_slave_112" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
<property name="driverClassName" value="com.mysql.jdbc.Driver"/>
<property name="url" value="${jdbc.url.slave.112}"></property>
<property name="username" value="${ername.slave.112}"></property>
<property name="password" value="${jdbc.password.slave.112}"></property>
<property name="maxActive" value="100"/>
<property name="initialSize" value="50"/>
<property name="maxWait" value="60000"/>
<property name="minIdle" value="5"/>
</bean>
<bean id="database_103" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"
destroy-method="close">
<property name="driverClassName" value="com.mysql.jdbc.Driver"/>
<property name="url" value="${jdbc.url.103}"></property>
<property name="username" value="${ername.103}"></property>
<property name="password" value="${jdbc.password.103}"></property>
<property name="maxActive" value="100"/>
<property name="initialSize" value="50"/>
<property name="maxWait" value="60000"/>
<property name="minIdle" value="5"/>
</bean>
<bean id="database_slave_103" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
<property name="driverClassName" value="com.mysql.jdbc.Driver"/>
<property name="url" value="${jdbc.url.slave.103}"></property>
<property name="username" value="${ername.slave.103}"></property>
<property name="password" value="${jdbc.password.slave.103}"></property>
<property name="maxActive" value="100"/>
<property name="initialSize" value="50"/>
<property name="maxWait" value="60000"/>
<property name="minIdle" value="5"/>
</bean>
<!--mybatis-->
<bean id="sqlSessionFactory" class="org.mybatis.spring.SqlSessionFactoryBean">
<property name="mapperLocations" value="classpath:xmlmapper/*.xml"/>
<property name="dataSource" ref="shardingDataSource"/>
<!-- 配置Mybatis配置⽂件 -->
<property name="configLocation" value="classpath:/mybatis/mybatis-config.xml"/>
</bean>
<bean id="sqlSession" class="org.mybatis.spring.SqlSessionTemplate">
<constructor-arg index="0" ref="sqlSessionFactory"/>
</bean>
<!-- 注解Mapper scanner -->
<bean class="org.mybatis.spring.mapper.MapperScannerConfigurer">
<property name="basePackage" value="com.lc.sharding.mybatismapper"/>
<property name="sqlSessionTemplateBeanName" value="sqlSession"/>
</bean>
<!-- 事务-->
<bean id="txManager"
class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
<property name="dataSource" ref="shardingDataSource"/>
</bean>
<tx:annotation-driven transaction-manager="txManager"/>
<!--读写分离-->
<rdb:master-slave-data-source id="rbb_112" master-data-source-ref="database_112"
slave-data-sources-ref="database_slave_112"/>
<rdb:master-slave-data-source id="rbb_103" master-data-source-ref="database_103"
slave-data-sources-ref="database_slave_103"/>
<!--分库策略-->
<rdb:strategy id="databaseShardingStrategy" sharding-columns="business_id"
algorithm-expression="rbb_${business_id.longValue()}"/>
<!--分表策略-->
<rdb:strategy id="tableShardingStrategy" sharding-columns="user_id,order_id"
algorithm-class="com.lc.sharding.algorithm.MultipleKeysTableShardingAlgorithmImpl"/>
<rdb:data-source id="shardingDataSource">
<rdb:sharding-rule data-sources="rbb_112,rbb_103">
<rdb:table-rules>
<rdb:table-rule logic-table="t_order" actual-tables="t_order_${0..3}" database-strategy="databaseShardingStrategy" table-strategy="tableShardingStrategy"/>
</rdb:table-rules>
</rdb:sharding-rule>
<rdb:props>
<prop key="metrics.enable">true</prop>
<prop key="sql.show">true</prop>
</rdb:props>
</rdb:data-source>
</beans
基因法多列分⽚
public class MultipleKeysTableShardingAlgorithmImpl implements MultipleKeysTableShardingAlgorithm {
public Collection<String> doSharding(Collection<String> tableNames, Collection<ShardingValue<?>> shardingValues) {
List<String> shardingSuffix = new ArrayList<String>();
long partId = 0;
for (ShardingValue value : shardingValues) {
if (value.getColumnName().equals("user_id")) {
partId = ((Long) value.getValue()) % 4;
break;
} else if (value.getColumnName().equals("order_id")) {
partId = ((Long) value.getValue()) % 4;
break;
}
}
for (String name : tableNames) {
if (name.endsWith(partId + "")) {
shardingSuffix.add(name);
return shardingSuffix;
}
}
return shardingSuffix;
}
}
什么是基因法分⽚?
在订单数据oid⽣成时,order_id末端加⼊分⽚基因,让同⼀个user_id下的所有订单都含有相同基因,落在同⼀个表上。
根据user_id⽣成order_id:
public long bulidOrderId(long userId) {
//取⽤户id后4位
userId = userId & 15;
//先取60位唯⼀id
long uniqueId = this.nextId();
//唯⼀id左移4位、拼接userId后4位
return (uniqueId << 4) | userId;
}
this.nextId();//使⽤雪花算法⽣成60位分布式唯⼀id:1位符号位+41位时间戳+5位workId+5位datacenterId+6位序列号+4位基因⽚⼩结
数据分⽚:
⽀持分库+分表;
可⽀持 = , BETWEEN,IN等多维度分⽚,也⽀持多分⽚键共⽤;
⽀持聚合,分组,排序,分页,关联等复杂查询语句;
分⽚灵活,⽀持多分⽚键共⽤,⽀持inline表达式;
基于Hint的强制路由;
⽀持分布式主键
读写分离:
⽀持⼀主多从的读写分离;
⽀持分库分表与读写分离共同使⽤
⽀持分布式⽣成全局主键。
柔性事务:
最⼤努⼒到达型事务
分布式治理:
⽀持配置中⼼,可动态修改⽀持客户端熔断和失效转移。