Skip to content

Commit b1ad219

Browse files
committed
1、增加Kingbase插件
2、fix DTStack#301 3、fix DTStack#276 4、fix DTStack#244 5、fix 其他BUG
1 parent 027cb4b commit b1ad219

File tree

334 files changed

+9988
-3538
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

334 files changed

+9988
-3538
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,5 @@ nohup.out
1010
flinkconf/
1111
hadoopconf/
1212
/default_task_id_output
13-
/syncplugins
13+
/syncplugins
14+
/ci/

README.md

+5-3
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,10 @@ The following databases are currently supported:
6666
| | Hive | | [doc](docs/offline/writer/hivewriter.md) |
6767
| Stream Synchronization | Kafka | [doc](docs/realTime/reader/kafkareader.md) | [doc](docs/realTime/writer/kafkawriter.md) |
6868
| | EMQX | [doc](docs/realTime/reader/emqxreader.md) | [doc](docs/realTime/writer/emqxwriter.md) |
69-
| | RestApi | [doc](docs/realTime/reader/restapireader.md) | [doc](docs/realTime/writer/restapiwriter.md) |
69+
| | RestApi || [doc](docs/realTime/writer/restapiwriter.md) |
7070
| | MySQL Binlog | [doc](docs/realTime/reader/binlogreader.md) | |
7171
| | MongoDB Oplog | [doc](docs/realTime/reader/mongodboplogreader.md)| |
7272
| | PostgreSQL WAL | [doc](docs/realTime/reader/pgwalreader.md) | |
73-
| | Oracle Logminer| Coming Soon | |
74-
| | SqlServer CDC | Coming Soon | |
7573

7674
# Quick Start
7775

@@ -89,6 +87,10 @@ Please click [Statistics Metric](docs/statistics.md)
8987

9088
Please click [Kerberos](docs/kerberos.md)
9189

90+
# Questions
91+
92+
Please click [Questions](docs/questions.md)
93+
9294
# How to contribute FlinkX
9395

9496
Please click [Contribution](docs/contribution.md)

README_CH.md

+17-4
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,18 @@ FlinkX
77

88
# 技术交流
99

10-
- 招聘**大数据平台开发工程师**,想了解岗位详细信息可以添加本人微信号ysqwhiletrue,注明招聘,如有意者发送简历至[[email protected]](mailto:[email protected])
10+
- 招聘**Flink研发工程师**,如果有兴趣可以联系思枢(微信号:ysqwhiletrue)<BR>
11+
Flink开发工程师JD要求:<BR>
12+
1.负责袋鼠云基于Flink的衍生框架数据同步flinkx和实时计算flinkstreamsql框架的开发;<BR>
13+
2.调研和把握当前最新大数据实时计算技术,将其中的合适技术引入到平台中,改善产品,提升竞争力;<BR>
14+
职位要求:<BR>
15+
1、本科及以上学历,3年及以上的Flink开发经验,精通Java,熟悉Scala、Python优先考虑;<BR>
16+
2、熟悉Flink原理,有基于Flink做过二次源码的开发,在github上贡献者Flink源码者优先;<BR>
17+
3、有机器学习、数据挖掘相关经验者优先;<BR>
18+
4、对新技术有快速学习和上手能力,对代码有一定的洁癖;<BR>
19+
加分项:<BR>
20+
1.在GitHub或其他平台上有过开源项目<BR>
21+
可以添加本人微信号ysqwhiletrue,注明招聘,如有意者发送简历至[[email protected]](mailto:[email protected])
1122

1223
- 我们使用[钉钉](https://www.dingtalk.com/)沟通交流,可以搜索群号[**30537511**]或者扫描下面的二维码进入钉钉群
1324

@@ -66,12 +77,10 @@ FlinkX目前支持下面这些数据库:
6677
| | Hive | | [doc](docs/offline/writer/hivewriter.md) |
6778
| Stream Synchronization | Kafka | [doc](docs/realTime/reader/kafkareader.md) | [doc](docs/realTime/writer/kafkawriter.md) |
6879
| | EMQX | [doc](docs/realTime/reader/emqxreader.md) | [doc](docs/realTime/writer/emqxwriter.md) |
69-
| | RestApi | [doc](docs/realTime/reader/restapireader.md) | [doc](docs/realTime/writer/restapiwriter.md) |
80+
| | RestApi | | [doc](docs/realTime/writer/restapiwriter.md) |
7081
| | MySQL Binlog | [doc](docs/realTime/reader/binlogreader.md) | |
7182
| | MongoDB Oplog | [doc](docs/realTime/reader/mongodboplogreader.md)| |
7283
| | PostgreSQL WAL | [doc](docs/realTime/reader/pgwalreader.md) | |
73-
| | Oracle Logminer| Coming Soon | |
74-
| | SqlServer CDC | Coming Soon | |
7584

7685
# 快速开始
7786

@@ -89,6 +98,10 @@ FlinkX目前支持下面这些数据库:
8998

9099
请点击[Kerberos](docs/kerberos.md)
91100

101+
# Questions
102+
103+
请点击[Questions](docs/questions.md)
104+
92105
# 如何贡献FlinkX
93106

94107
请点击[如何贡献FlinkX](docs/contribution.md)

bin/install_jars.bat

+3
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,6 @@ call mvn install:install-file -DgroupId=com.esen.jdbc -DartifactId=gbase -Dversi
66

77
call mvn install:install-file -DgroupId=dm.jdbc.driver -DartifactId=dm7 -Dversion=18.0.0 -Dpackaging=jar -Dfile=../jars/Dm7JdbcDriver18.jar
88

9+
call mvn install:install-file -DgroupId=com.kingbase8 -DartifactId=kingbase8 -Dversion=8.2.0 -Dpackaging=jar -Dfile=../jars/kingbase8-8.2.0.jar
10+
11+
call mvn install:install-file -DgroupId=fakepath -DartifactId=vertica-jdbc -Dversion=9.1.1-0 -Dpackaging=jar -Dfile=../jars/vertica-jdbc-9.1.1-0.jar

bin/install_jars.sh

+7-1
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,10 @@ mvn install:install-file -DgroupId=com.github.noraui -DartifactId=ojdbc8 -Dversi
1010
mvn install:install-file -DgroupId=com.esen.jdbc -DartifactId=gbase -Dversion=8.3.81.53 -Dpackaging=jar -Dfile=../jars/gbase-8.3.81.53.jar
1111

1212
## dm driver
13-
mvn install:install-file -DgroupId=dm.jdbc.driver -DartifactId=dm7 -Dversion=18.0.0 -Dpackaging=jar -Dfile=../jars/Dm7JdbcDriver18.jar
13+
mvn install:install-file -DgroupId=dm.jdbc.driver -DartifactId=dm7 -Dversion=18.0.0 -Dpackaging=jar -Dfile=../jars/Dm7JdbcDriver18.jar
14+
15+
## kingbase driver
16+
mvn install:install-file -DgroupId=com.kingbase8 -DartifactId=kingbase8 -Dversion=8.2.0 -Dpackaging=jar -Dfile=../jars/kingbase8-8.2.0.jar
17+
18+
## vertica driver
19+
mvn install:install-file -DgroupId=fakepath -DartifactId=vertica-jdbc -Dversion=9.1.1-0 -Dpackaging=jar -Dfile=../jars/vertica-jdbc-9.1.1-0.jar

docs/example/kafka09_stream.json

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader": {
6+
"parameter": {
7+
"topic": "kafka09",
8+
"groupId": "default",
9+
"codec": "text",
10+
"encoding": "UTF-8",
11+
"blankIgnore": false,
12+
"consumerSettings": {
13+
"zookeeper.connect": "localhost:2181/kafka09"
14+
}
15+
},
16+
"name": "kafka09reader"
17+
},
18+
"writer": {
19+
"parameter": {
20+
"print": true
21+
},
22+
"name": "streamwriter"
23+
}
24+
}
25+
],
26+
"setting": {
27+
"restore": {
28+
"isRestore": false,
29+
"isStream": true
30+
},
31+
"speed": {
32+
"channel": 1
33+
}
34+
}
35+
}
36+
}

docs/example/kafka10_stream.json

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader": {
6+
"parameter": {
7+
"topic": "kafka10",
8+
"groupId": "default",
9+
"codec": "text",
10+
"encoding": "UTF-8",
11+
"blankIgnore": false,
12+
"consumerSettings": {
13+
"bootstrap.servers": "localhost:9092"
14+
}
15+
},
16+
"name": "kafka10reader"
17+
},
18+
"writer": {
19+
"parameter": {
20+
"print": true
21+
},
22+
"name": "streamwriter"
23+
}
24+
}
25+
],
26+
"writer": {
27+
"parameter": {
28+
"print": true
29+
},
30+
"name": "streamwriter"
31+
}
32+
},
33+
"setting": {
34+
"restore": {
35+
"isRestore": false,
36+
"isStream": true
37+
},
38+
"speed": {
39+
"channel": 1
40+
}
41+
}
42+
}

docs/example/kafka11_stream.json

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader": {
6+
"parameter": {
7+
"topic": "kafka11",
8+
"groupId": "default",
9+
"codec": "text",
10+
"encoding": "UTF-8",
11+
"blankIgnore": false,
12+
"consumerSettings": {
13+
"bootstrap.servers": "localhost:9092"
14+
}
15+
},
16+
"name": "kafka11reader"
17+
},
18+
"writer": {
19+
"parameter": {
20+
"print": true
21+
},
22+
"name": "streamwriter"
23+
}
24+
}
25+
],
26+
"setting": {
27+
"restore": {
28+
"isRestore": false,
29+
"isStream": true
30+
},
31+
"speed": {
32+
"channel": 1
33+
}
34+
}
35+
}
36+
}

docs/example/kafka_hive.json

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader" : {
6+
"parameter" : {
7+
"topic" : "test",
8+
"mode": "timestamp",
9+
"timestamp": 1609812275000,
10+
"codec": "text",
11+
"consumerSettings" : {
12+
"bootstrap.servers" : "ip1:9092,ip2:9092,ip3:9092"
13+
}
14+
},
15+
"name" : "kafkareader"
16+
},
17+
"writer": {
18+
"parameter" : {
19+
"jdbcUrl" : "jdbc:hive2://ip:10000/test",
20+
"fileType" : "parquet",
21+
"writeMode" : "overwrite",
22+
"compress" : "",
23+
"charsetName" : "UTF-8",
24+
"maxFileSize" : 1073741824,
25+
"tablesColumn" : "{\"message\":[{\"part\":false,\"comment\":\"\",\"type\":\"string\",\"key\":\"message\"}]}",
26+
"partition" : "pt",
27+
"partitionType" : "DAY",
28+
"defaultFS" : "hdfs://ns",
29+
"hadoopConfig": {
30+
"dfs.ha.namenodes.ns": "nn1,nn2",
31+
"dfs.namenode.rpc-address.ns.nn2": "ip1:9000",
32+
"dfs.client.failover.proxy.provider.ns": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
33+
"dfs.namenode.rpc-address.ns.nn1": "ip2:9000",
34+
"dfs.nameservices": "ns"
35+
}
36+
},
37+
"name" : "hivewriter"
38+
}
39+
}
40+
],
41+
"setting": {
42+
"restore": {
43+
"isRestore": true,
44+
"isStream": true
45+
},
46+
"speed": {
47+
"readerChannel": 3,
48+
"writerChannel": 1
49+
}
50+
}
51+
}
52+
}

docs/example/kafka_mysql.json

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader": {
6+
"parameter": {
7+
"topic": "tudou",
8+
"mode": "timestamp",
9+
"timestamp": 1609812275000,
10+
"offset": "partition:0,offset:0;partition:1,offset:1;partition:2,offset:2",
11+
"codec": "text",
12+
"blankIgnore": false,
13+
"consumerSettings": {
14+
"bootstrap.servers": "ip1:9092,ip2:9092,ip3:9092"
15+
},
16+
"column": ["id","user_id","name"]
17+
},
18+
"name": "kafkareader"
19+
},
20+
"writer": {
21+
"name": "mysqlwriter",
22+
"parameter": {
23+
"username": "root",
24+
"password": "abc123",
25+
"connection": [
26+
{
27+
"jdbcUrl": "jdbc:mysql://localhost:3306/test",
28+
"table": [
29+
"test"
30+
]
31+
}
32+
],
33+
"preSql": ["truncate table test;"],
34+
"postSql": [],
35+
"writeMode": "insert",
36+
"column": [
37+
{
38+
"name": "id",
39+
"type": "BIGINT"
40+
},
41+
{
42+
"name": "user_id",
43+
"type": "BIGINT"
44+
},
45+
{
46+
"name": "name",
47+
"type": "varchar"
48+
}
49+
]
50+
}
51+
}
52+
}
53+
],
54+
"setting": {
55+
"restore": {
56+
"isRestore": true,
57+
"isStream": true
58+
},
59+
"speed": {
60+
"readerChannel": 3,
61+
"writerChannel": 1
62+
}
63+
}
64+
}
65+
}

docs/example/kafka_stream.json

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
{
2+
"job": {
3+
"content": [
4+
{
5+
"reader": {
6+
"parameter": {
7+
"topic": "test",
8+
"mode": "timestamp",
9+
"timestamp": 1609812275000,
10+
"offset": "partition:0,offset:0;partition:1,offset:1;partition:2,offset:2",
11+
"codec": "text",
12+
"blankIgnore": false,
13+
"consumerSettings": {
14+
"bootstrap.servers": "ip1:9092,ip2:9092,ip3:9092"
15+
}
16+
},
17+
"name": "kafkareader"
18+
},
19+
"writer": {
20+
"parameter": {
21+
"print": true
22+
},
23+
"name": "streamwriter"
24+
}
25+
}
26+
],
27+
"setting": {
28+
"speed": {
29+
"readerChannel": 3,
30+
"writerChannel": 1
31+
}
32+
}
33+
}
34+
}

0 commit comments

Comments
 (0)