This repository was archived by the owner on Nov 27, 2023. It is now read-only.
forked from confluentinc/kafka-connect-jdbc
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge remote-tracking branch 'jdbc-sink/master' into jdbc-sink-merge
One hitch: had to disable `reuseForks` for tests in pom.xml because of static mocking of DriverManager in the source connector tests
- Loading branch information
Showing
37 changed files
with
4,290 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
77 changes: 77 additions & 0 deletions
77
src/main/java/io/confluent/connect/jdbc/JdbcSinkConnector.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
/* | ||
* Copyright 2016 Confluent Inc. | ||
* | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package io.confluent.connect.jdbc; | ||
|
||
import org.apache.kafka.common.config.Config; | ||
import org.apache.kafka.common.config.ConfigDef; | ||
import org.apache.kafka.connect.connector.Task; | ||
import org.apache.kafka.connect.sink.SinkConnector; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
import java.util.ArrayList; | ||
import java.util.List; | ||
import java.util.Map; | ||
|
||
import io.confluent.connect.jdbc.sink.JdbcSinkConfig; | ||
import io.confluent.connect.jdbc.sink.JdbcSinkTask; | ||
|
||
public final class JdbcSinkConnector extends SinkConnector { | ||
private static final Logger log = LoggerFactory.getLogger(JdbcSinkConnector.class); | ||
|
||
private Map<String, String> configProps; | ||
|
||
public Class<? extends Task> taskClass() { | ||
return JdbcSinkTask.class; | ||
} | ||
|
||
@Override | ||
public List<Map<String, String>> taskConfigs(int maxTasks) { | ||
log.info("Setting task configurations for {} workers.", maxTasks); | ||
final List<Map<String, String>> configs = new ArrayList<>(maxTasks); | ||
for (int i = 0; i < maxTasks; ++i) { | ||
configs.add(configProps); | ||
} | ||
return configs; | ||
} | ||
|
||
@Override | ||
public void start(Map<String, String> props) { | ||
configProps = props; | ||
} | ||
|
||
@Override | ||
public void stop() { | ||
} | ||
|
||
@Override | ||
public ConfigDef config() { | ||
return JdbcSinkConfig.CONFIG_DEF; | ||
} | ||
|
||
@Override | ||
public Config validate(Map<String, String> connectorConfigs) { | ||
// TODO cross-fields validation here: pkFields against the pkMode | ||
return super.validate(connectorConfigs); | ||
} | ||
|
||
@Override | ||
public String version() { | ||
// TODO switch away to whatever connector-jdbc is doing | ||
return getClass().getPackage().getImplementationVersion(); | ||
} | ||
} |
131 changes: 131 additions & 0 deletions
131
src/main/java/io/confluent/connect/jdbc/sink/BufferedRecords.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
/* | ||
* Copyright 2016 Confluent Inc. | ||
* | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package io.confluent.connect.jdbc.sink; | ||
|
||
import org.apache.kafka.connect.errors.ConnectException; | ||
import org.apache.kafka.connect.sink.SinkRecord; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
import java.sql.Connection; | ||
import java.sql.PreparedStatement; | ||
import java.sql.SQLException; | ||
import java.util.ArrayList; | ||
import java.util.Collections; | ||
import java.util.List; | ||
|
||
import io.confluent.connect.jdbc.sink.dialect.DbDialect; | ||
import io.confluent.connect.jdbc.sink.metadata.FieldsMetadata; | ||
import io.confluent.connect.jdbc.sink.metadata.SchemaPair; | ||
|
||
public class BufferedRecords { | ||
private static final Logger log = LoggerFactory.getLogger(BufferedRecords.class); | ||
|
||
private final String tableName; | ||
private final JdbcSinkConfig config; | ||
private final DbDialect dbDialect; | ||
private final DbStructure dbStructure; | ||
private final Connection connection; | ||
|
||
private List<SinkRecord> records = new ArrayList<>(); | ||
private SchemaPair currentSchemaPair; | ||
private FieldsMetadata fieldsMetadata; | ||
private PreparedStatement preparedStatement; | ||
private PreparedStatementBinder preparedStatementBinder; | ||
|
||
public BufferedRecords(JdbcSinkConfig config, String tableName, DbDialect dbDialect, DbStructure dbStructure, Connection connection) { | ||
this.tableName = tableName; | ||
this.config = config; | ||
this.dbDialect = dbDialect; | ||
this.dbStructure = dbStructure; | ||
this.connection = connection; | ||
} | ||
|
||
public List<SinkRecord> add(SinkRecord record) throws SQLException { | ||
final SchemaPair schemaPair = new SchemaPair(record.keySchema(), record.valueSchema()); | ||
|
||
if (currentSchemaPair == null) { | ||
currentSchemaPair = schemaPair; | ||
// re-initialize everything that depends on the record schema | ||
fieldsMetadata = FieldsMetadata.extract(tableName, config.pkMode, config.pkFields, currentSchemaPair); | ||
dbStructure.createOrAmendIfNecessary(config, connection, tableName, fieldsMetadata); | ||
final String insertSql = getInsertSql(); | ||
log.debug("insertion sql:{}", config.insertMode, insertSql); | ||
preparedStatement = connection.prepareStatement(insertSql); | ||
preparedStatementBinder = new PreparedStatementBinder(preparedStatement, config.pkMode, schemaPair, fieldsMetadata); | ||
} | ||
|
||
final List<SinkRecord> flushed; | ||
if (currentSchemaPair.equals(schemaPair)) { | ||
// Continue with current batch state | ||
records.add(record); | ||
if (records.size() >= config.batchSize) { | ||
flushed = flush(); | ||
} else { | ||
flushed = Collections.emptyList(); | ||
} | ||
} else { | ||
// Each batch needs to have the same SchemaPair, so get the buffered records out, reset state and re-attempt the add | ||
flushed = flush(); | ||
currentSchemaPair = null; | ||
flushed.addAll(add(record)); | ||
} | ||
return flushed; | ||
} | ||
|
||
public List<SinkRecord> flush() throws SQLException { | ||
if (records.isEmpty()) { | ||
return new ArrayList<>(); | ||
} | ||
for (SinkRecord record : records) { | ||
preparedStatementBinder.bindRecord(record); | ||
} | ||
int totalUpdateCount = 0; | ||
for (int updateCount : preparedStatement.executeBatch()) { | ||
totalUpdateCount += updateCount; | ||
} | ||
if (totalUpdateCount != records.size()) { | ||
switch (config.insertMode) { | ||
case INSERT: | ||
throw new ConnectException(String.format("Update count (%d) did not sum up to total number of records inserted (%d)", | ||
totalUpdateCount, records.size())); | ||
case UPSERT: | ||
log.trace("Upserted records:{} resulting in in totalUpdateCount:{}", records.size(), totalUpdateCount); | ||
} | ||
} | ||
|
||
final List<SinkRecord> flushedRecords = records; | ||
records = new ArrayList<>(); | ||
return flushedRecords; | ||
} | ||
|
||
private String getInsertSql() { | ||
switch (config.insertMode) { | ||
case INSERT: | ||
return dbDialect.getInsert(tableName, fieldsMetadata.keyFieldNames, fieldsMetadata.nonKeyFieldNames); | ||
case UPSERT: | ||
if (fieldsMetadata.keyFieldNames.isEmpty()) { | ||
throw new ConnectException(String.format( | ||
"Write to table '%s' in UPSERT mode requires key field names to be known, check the primary key configuration", tableName | ||
)); | ||
} | ||
return dbDialect.getUpsertQuery(tableName, fieldsMetadata.keyFieldNames, fieldsMetadata.nonKeyFieldNames); | ||
default: | ||
throw new ConnectException("Invalid insert mode"); | ||
} | ||
} | ||
} |
Oops, something went wrong.