Commit 9663144d authored by gaogao110's avatar gaogao110

update Phoenix connector

parent adc02868
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.types.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.function.Function;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement;
/**
* OutputFormat to write Rows into a JDBC database. The OutputFormat has to be configured using the
* supplied OutputFormatBuilder.
*/
@Experimental
public class JdbcOutputFormat
extends JdbcBatchingOutputFormat<Row, Row, JdbcBatchStatementExecutor<Row>> {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(JdbcOutputFormat.class);
private JdbcOutputFormat(
JdbcConnectionProvider connectionProvider,
String sql,
int[] typesArray,
int batchSize) {
super(
connectionProvider,
new JdbcExecutionOptions.Builder().withBatchSize(batchSize).build(),
ctx -> createRowExecutor(sql, typesArray, ctx),
RecordExtractor.identity());
}
private static JdbcBatchStatementExecutor<Row> createRowExecutor(
String sql, int[] typesArray, RuntimeContext ctx) {
JdbcStatementBuilder<Row> statementBuilder =
(st, record) -> setRecordToStatement(st, typesArray, record);
return JdbcBatchStatementExecutor.simple(
sql,
statementBuilder,
ctx.getExecutionConfig().isObjectReuseEnabled() ? Row::copy : Function.identity());
}
public static JdbcOutputFormatBuilder buildJdbcOutputFormat() {
return new JdbcOutputFormatBuilder();
}
/** Builder for {@link JdbcOutputFormat}. */
public static class JdbcOutputFormatBuilder {
private String username;
private String password;
private String drivername;
private String dbURL;
private String query;
private int batchSize = JdbcExecutionOptions.DEFAULT_SIZE;
private int[] typesArray;
private JdbcOutputFormatBuilder() {}
public JdbcOutputFormatBuilder setUsername(String username) {
this.username = username;
return this;
}
public JdbcOutputFormatBuilder setPassword(String password) {
this.password = password;
return this;
}
public JdbcOutputFormatBuilder setDrivername(String drivername) {
this.drivername = drivername;
return this;
}
public JdbcOutputFormatBuilder setDBUrl(String dbURL) {
this.dbURL = dbURL;
return this;
}
public JdbcOutputFormatBuilder setQuery(String query) {
this.query = query;
return this;
}
public JdbcOutputFormatBuilder setBatchSize(int batchSize) {
this.batchSize = batchSize;
return this;
}
public JdbcOutputFormatBuilder setSqlTypes(int[] typesArray) {
this.typesArray = typesArray;
return this;
}
/**
* Finalizes the configuration and checks validity.
*
* @return Configured JdbcOutputFormat
*/
public JdbcOutputFormat finish() {
return new JdbcOutputFormat(
new SimpleJdbcConnectionProvider(buildConnectionOptions()),
query,
typesArray,
batchSize);
}
public JdbcConnectionOptions buildConnectionOptions() {
if (this.username == null) {
LOG.info("Username was not supplied.");
}
if (this.password == null) {
LOG.info("Password was not supplied.");
}
return new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
.withUrl(dbURL)
.withDriverName(drivername)
.withUsername(username)
.withPassword(password)
.build();
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.util.Preconditions;
import javax.sql.XADataSource;
import java.util.function.Function;
/** Facade to create JDBC {@link SinkFunction sinks}. */
@PublicEvolving
public class JdbcSink {
/**
* Create a JDBC sink with the default {@link JdbcExecutionOptions}.
*
* @see #sink(String, JdbcStatementBuilder, JdbcExecutionOptions, JdbcConnectionOptions)
*/
public static <T> SinkFunction<T> sink(
String sql,
JdbcStatementBuilder<T> statementBuilder,
JdbcConnectionOptions connectionOptions) {
return sink(sql, statementBuilder, JdbcExecutionOptions.defaults(), connectionOptions);
}
/**
* Create a JDBC sink.
*
* <p>Note: the objects passed to the return sink can be processed in batch and retried.
* Therefore, objects can not be {@link
* org.apache.flink.api.common.ExecutionConfig#enableObjectReuse() reused}.
*
* @param sql arbitrary DML query (e.g. insert, update, upsert)
* @param statementBuilder sets parameters on {@link java.sql.PreparedStatement} according to
* the query
* @param <T> type of data in {@link
* org.apache.flink.streaming.runtime.streamrecord.StreamRecord StreamRecord}.
* @param executionOptions parameters of execution, such as batch size and maximum retries
* @param connectionOptions parameters of connection, such as JDBC URL
*/
public static <T> SinkFunction<T> sink(
String sql,
JdbcStatementBuilder<T> statementBuilder,
JdbcExecutionOptions executionOptions,
JdbcConnectionOptions connectionOptions) {
return new GenericJdbcSinkFunction<>(
new JdbcBatchingOutputFormat<>(
new SimpleJdbcConnectionProvider(connectionOptions),
executionOptions,
context -> {
Preconditions.checkState(
!context.getExecutionConfig().isObjectReuseEnabled(),
"objects can not be reused with JDBC sink function");
return JdbcBatchStatementExecutor.simple(
sql, statementBuilder, Function.identity());
},
JdbcBatchingOutputFormat.RecordExtractor.identity()));
}
/**
* Create JDBC sink which provides exactly-once guarantee.
*
* <p>Note: the objects passed to the return sink can be processed in batch and retried.
* Therefore, objects can not be {@link
* org.apache.flink.api.common.ExecutionConfig#enableObjectReuse() reused}.
*
* @param sql arbitrary DML query (e.g. insert, update, upsert)
* @param statementBuilder sets parameters on {@link java.sql.PreparedStatement} according to
* the query
* @param <T> type of data in {@link
* org.apache.flink.streaming.runtime.streamrecord.StreamRecord StreamRecord}.
* @param executionOptions parameters of execution, such as batch size and maximum retries
* @param exactlyOnceOptions exactly-once options
* @param dataSourceSupplier supplies the {@link XADataSource}
*/
/*
public static <T> SinkFunction<T> exactlyOnceSink(
String sql,
JdbcStatementBuilder<T> statementBuilder,
JdbcExecutionOptions executionOptions,
JdbcExactlyOnceOptions exactlyOnceOptions,
SerializableSupplier<XADataSource> dataSourceSupplier) {
return new JdbcXaSinkFunction<>(
sql,
statementBuilder,
XaFacade.fromXaDataSourceSupplier(
dataSourceSupplier,
exactlyOnceOptions.getTimeoutSec(),
exactlyOnceOptions.isTransactionPerConnection()),
executionOptions,
exactlyOnceOptions);
}
*/
private JdbcSink() {}
}
......@@ -93,11 +93,11 @@ import java.util.Arrays;
* @see DriverManager
*/
@Experimental
public class JdbcInputFormat extends RichInputFormat<Row, InputSplit>
public class PhoenixInputFormat extends RichInputFormat<Row, InputSplit>
implements ResultTypeQueryable<Row> {
protected static final long serialVersionUID = 2L;
protected static final Logger LOG = LoggerFactory.getLogger(JdbcInputFormat.class);
protected static final Logger LOG = LoggerFactory.getLogger(PhoenixInputFormat.class);
protected JdbcConnectionProvider connectionProvider;
protected String queryTemplate;
......@@ -118,7 +118,7 @@ public class JdbcInputFormat extends RichInputFormat<Row, InputSplit>
protected boolean hasNext;
protected Object[][] parameterValues;
public JdbcInputFormat() {
public PhoenixInputFormat() {
}
@Override
......@@ -339,73 +339,73 @@ public class JdbcInputFormat extends RichInputFormat<Row, InputSplit>
*
* @return builder
*/
public static JdbcInputFormatBuilder buildJdbcInputFormat() {
return new JdbcInputFormatBuilder();
public static PhoenixInputFormatBuilder buildJdbcInputFormat() {
return new PhoenixInputFormatBuilder();
}
/**
* Builder for {@link JdbcInputFormat}.
* Builder for {@link PhoenixInputFormat}.
*/
public static class JdbcInputFormatBuilder {
public static class PhoenixInputFormatBuilder {
private final JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder;
private final JdbcInputFormat format;
private final PhoenixInputFormat format;
public JdbcInputFormatBuilder() {
public PhoenixInputFormatBuilder() {
//this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
this.format = new JdbcInputFormat();
this.format = new PhoenixInputFormat();
// using TYPE_FORWARD_ONLY for high performance reads
this.format.resultSetType = ResultSet.TYPE_FORWARD_ONLY;
this.format.resultSetConcurrency = ResultSet.CONCUR_READ_ONLY;
}
public JdbcInputFormatBuilder setUsername(String username) {
public PhoenixInputFormatBuilder setUsername(String username) {
connOptionsBuilder.withUsername(username);
return this;
}
public JdbcInputFormatBuilder setPassword(String password) {
public PhoenixInputFormatBuilder setPassword(String password) {
connOptionsBuilder.withPassword(password);
return this;
}
public JdbcInputFormatBuilder setDrivername(String drivername) {
public PhoenixInputFormatBuilder setDrivername(String drivername) {
connOptionsBuilder.withDriverName(drivername);
return this;
}
public JdbcInputFormatBuilder setDBUrl(String dbURL) {
public PhoenixInputFormatBuilder setDBUrl(String dbURL) {
connOptionsBuilder.withUrl(dbURL);
return this;
}
public JdbcInputFormatBuilder setQuery(String query) {
public PhoenixInputFormatBuilder setQuery(String query) {
format.queryTemplate = query;
return this;
}
public JdbcInputFormatBuilder setResultSetType(int resultSetType) {
public PhoenixInputFormatBuilder setResultSetType(int resultSetType) {
format.resultSetType = resultSetType;
return this;
}
public JdbcInputFormatBuilder setResultSetConcurrency(int resultSetConcurrency) {
public PhoenixInputFormatBuilder setResultSetConcurrency(int resultSetConcurrency) {
format.resultSetConcurrency = resultSetConcurrency;
return this;
}
public JdbcInputFormatBuilder setParametersProvider(
public PhoenixInputFormatBuilder setParametersProvider(
JdbcParameterValuesProvider parameterValuesProvider) {
format.parameterValues = parameterValuesProvider.getParameterValues();
return this;
}
public JdbcInputFormatBuilder setRowTypeInfo(RowTypeInfo rowTypeInfo) {
public PhoenixInputFormatBuilder setRowTypeInfo(RowTypeInfo rowTypeInfo) {
format.rowTypeInfo = rowTypeInfo;
return this;
}
public JdbcInputFormatBuilder setFetchSize(int fetchSize) {
public PhoenixInputFormatBuilder setFetchSize(int fetchSize) {
Preconditions.checkArgument(
fetchSize == Integer.MIN_VALUE || fetchSize > 0,
"Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.",
......@@ -414,23 +414,23 @@ public class JdbcInputFormat extends RichInputFormat<Row, InputSplit>
return this;
}
public JdbcInputFormatBuilder setAutoCommit(Boolean autoCommit) {
public PhoenixInputFormatBuilder setAutoCommit(Boolean autoCommit) {
format.autoCommit = autoCommit;
return this;
}
public JdbcInputFormatBuilder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) {
public PhoenixInputFormatBuilder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) {
format.namespaceMappingEnabled = namespaceMappingEnabled;
return this;
}
public JdbcInputFormatBuilder setMapSystemTablesEnabled(Boolean mapSystemTablesEnabled) {
public PhoenixInputFormatBuilder setMapSystemTablesEnabled(Boolean mapSystemTablesEnabled) {
format.mapSystemTablesEnabled = mapSystemTablesEnabled;
return this;
}
public JdbcInputFormat finish() {
public PhoenixInputFormat finish() {
format.connectionProvider =
//new SimpleJdbcConnectionProvider(connOptionsBuilder.build());
new PhoneixJdbcConnectionProvider(connOptionsBuilder.build(), format.namespaceMappingEnabled, format.namespaceMappingEnabled);
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.table.data.RowData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashMap;
import org.apache.flink.types.Row;
import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A JDBC outputFormat that supports batching records before writing records to database.
*/
@Internal
public class PhoenixSinkFunction extends RichSinkFunction<Tuple2<Boolean, Row>>
implements CheckpointedFunction {
private static final Logger logger = LoggerFactory.getLogger(PhoenixSinkFunction.class);
private static Connection connection = null;
private PreparedStatement psUp = null;
private static int batchcount = 0;
private static int totalcount = 0;
private final JdbcOptions jdbcOptions;
private final PhoneixJdbcConnectionProvider connectionProvider;
private String[] fieldNames;
private String[] keyFields;
private int[] fieldTypes;
public PhoenixSinkFunction(JdbcOptions jdbcOptions, PhoneixJdbcConnectionProvider connectionProvider,String[] fieldNames,String[] keyFields,int[] fieldTypes) {
super();
this.jdbcOptions = jdbcOptions;
this.connectionProvider = connectionProvider;
this.fieldNames = fieldNames;
this.keyFields = keyFields;
this.fieldTypes = fieldTypes;
}
@Override
public void open(Configuration parameters) throws Exception {
logger.info("打开连接!!!");
try {
connection = connectionProvider.getOrEstablishConnection();
} catch (Exception e) {
throw new IOException("unable to open JDBC writer", e);
}
checkNotNull(jdbcOptions, "No options supplied.");
checkNotNull(fieldNames, "No fieldNames supplied.");
JdbcDmlOptions dml =
JdbcDmlOptions.builder()
.withTableName(jdbcOptions.getTableName())
.withDialect(jdbcOptions.getDialect())
.withFieldNames(fieldNames)
.withKeyFields(keyFields)
.withFieldTypes(fieldTypes)
.build();
String sql =
FieldNamedPreparedStatementImpl.parseNamedStatement(
jdbcOptions.getDialect()
.getInsertIntoStatement(
dml.getTableName(), dml.getFieldNames()),
new HashMap<>());
psUp = connection.prepareStatement(sql);
logger.info("创建prepareStatement!!! sql: "+sql);
}
@Override
public void invoke(Tuple2<Boolean, Row> value, Context context) throws Exception {
setRecordToStatement(psUp, fieldTypes, value.f1);
psUp.executeUpdate();
batchcount++;
if (batchcount == 1000) {
connection.commit();
batchcount = 0;
}
}
@Override
public void close() throws Exception {
logger.info("关闭连接!!!");
connection.commit();
if (psUp != null ) {
try {
psUp.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
if (connection != null) {
try {
connection.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
}
@Override
public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
}
@Override
public void initializeState(FunctionInitializationContext functionInitializationContext) throws Exception {
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.internal.connection;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.NotThreadSafe;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Enumeration;
import java.util.Properties;
/** Simple JDBC connection provider. */
@NotThreadSafe
public class SimpleJdbcConnectionProvider implements JdbcConnectionProvider, Serializable {
private static final Logger LOG = LoggerFactory.getLogger(SimpleJdbcConnectionProvider.class);
private static final long serialVersionUID = 1L;
private final JdbcConnectionOptions jdbcOptions;
private transient Driver loadedDriver;
private transient Connection connection;
static {
// Load DriverManager first to avoid deadlock between DriverManager's
// static initialization block and specific driver class's static
// initialization block when two different driver classes are loading
// concurrently using Class.forName while DriverManager is uninitialized
// before.
//
// This could happen in JDK 8 but not above as driver loading has been
// moved out of DriverManager's static initialization block since JDK 9.
DriverManager.getDrivers();
}
public SimpleJdbcConnectionProvider(JdbcConnectionOptions jdbcOptions) {
this.jdbcOptions = jdbcOptions;
}
@Override
public Connection getConnection() {
return connection;
}
@Override
public boolean isConnectionValid() throws SQLException {
return connection != null
&& connection.isValid(jdbcOptions.getConnectionCheckTimeoutSeconds());
}
private static Driver loadDriver(String driverName)
throws SQLException, ClassNotFoundException {
Preconditions.checkNotNull(driverName);
Enumeration<Driver> drivers = DriverManager.getDrivers();
while (drivers.hasMoreElements()) {
Driver driver = drivers.nextElement();
if (driver.getClass().getName().equals(driverName)) {
return driver;
}
}
// We could reach here for reasons:
// * Class loader hell of DriverManager(see JDK-8146872).
// * driver is not installed as a service provider.
Class<?> clazz =
Class.forName(driverName, true, Thread.currentThread().getContextClassLoader());
try {
return (Driver) clazz.newInstance();
} catch (Exception ex) {
throw new SQLException("Fail to create driver of class " + driverName, ex);
}
}
private Driver getLoadedDriver() throws SQLException, ClassNotFoundException {
if (loadedDriver == null) {
loadedDriver = loadDriver(jdbcOptions.getDriverName());
}
return loadedDriver;
}
@Override
public Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException {
if (connection != null) {
return connection;
}
if (jdbcOptions.getDriverName() == null) {
connection =
DriverManager.getConnection(
jdbcOptions.getDbURL(),
jdbcOptions.getUsername().orElse(null),
jdbcOptions.getPassword().orElse(null));
} else {
Driver driver = getLoadedDriver();
Properties info = new Properties();
jdbcOptions.getUsername().ifPresent(user -> info.setProperty("user", user));
jdbcOptions.getPassword().ifPresent(password -> info.setProperty("password", password));
connection = driver.connect(jdbcOptions.getDbURL(), info);
if (connection == null) {
// Throw same exception as DriverManager.getConnection when no driver found to match
// caller expectation.
throw new SQLException(
"No suitable driver found for " + jdbcOptions.getDbURL(), "08001");
}
}
return connection;
}
@Override
public void closeConnection() {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
LOG.warn("JDBC connection close failed.", e);
} finally {
connection = null;
}
}
}
@Override
public Connection reestablishConnection() throws SQLException, ClassNotFoundException {
closeConnection();
return getOrEstablishConnection();
}
}
......@@ -20,6 +20,7 @@ package org.apache.flink.connector.phoenix.internal.executor;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.apache.flink.connector.phoenix.table.PhoenixUpsertTableSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -41,7 +42,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
* and inserting otherwise. Used in Table API.
*
* @deprecated This has been replaced with {@link TableInsertOrUpdateStatementExecutor}, will remove
* this once {@link org.apache.flink.connector.phoenix.table.JdbcUpsertTableSink} is removed.
* this once {@link PhoenixUpsertTableSink} is removed.
*/
@Internal
public final class InsertOrUpdateJdbcExecutor<R, K, V> implements JdbcBatchStatementExecutor<R> {
......@@ -103,10 +104,8 @@ public final class InsertOrUpdateJdbcExecutor<R, K, V> implements JdbcBatchState
for (Map.Entry<K, V> entry : batch.entrySet()) {
processOneRowInBatch(entry.getKey(), entry.getValue());
}
//updateStatement.executeBatch();
//insertStatement.executeBatch();
//batch.clear();
conn.commit();
batch.clear();
}
}
......@@ -114,11 +113,8 @@ public final class InsertOrUpdateJdbcExecutor<R, K, V> implements JdbcBatchState
if (exist(pk)) {
updateSetter.accept(updateStatement, row);
updateStatement.executeUpdate();
//updateStatement.addBatch();
} else {
insertSetter.accept(insertStatement, row);
// insertStatement.addBatch();
insertStatement.executeUpdate();
}
}
......
......@@ -72,12 +72,11 @@ class KeyedBatchStatementExecutor<T, K> implements JdbcBatchStatementExecutor<T>
if (!batch.isEmpty()) {
for (K entry : batch) {
parameterSetter.accept(st, entry);
//st.addBatch();
st.executeUpdate();
}
//st.executeBatch();
//batch.clear();
LOG.info("connection commit datasize:" + batch.size());
conn.commit();
batch.clear();
}
}
......
......@@ -60,7 +60,6 @@ class SimpleBatchStatementExecutor<T, V> implements JdbcBatchStatementExecutor<T
@Override
public void addToBatch(T record) {
LOG.info("添加数据:" + record.toString());
batch.add(valueTransformer.apply(record));
}
......@@ -69,12 +68,9 @@ class SimpleBatchStatementExecutor<T, V> implements JdbcBatchStatementExecutor<T
if (!batch.isEmpty()) {
for (V r : batch) {
parameterSetter.accept(st, r);
//st.addBatch();
st.executeUpdate();
}
//st.executeBatch();
LOG.info("提交数据:" +batch.size() );
LOG.info("connection commit dataSize:" + batch.size());
connection.commit();
batch.clear();
}
......
......@@ -85,12 +85,10 @@ public final class TableInsertOrUpdateStatementExecutor
private void processOneRowInBatch(RowData pk, RowData row) throws SQLException {
if (exist(pk)) {
updateSetter.toExternal(row, updateStatement);
//updateStatement.addBatch();
updateStatement.executeBatch();
updateStatement.addBatch();
} else {
insertSetter.toExternal(row, insertStatement);
//insertStatement.addBatch();
insertStatement.executeBatch();
insertStatement.addBatch();
}
}
......@@ -103,9 +101,6 @@ public final class TableInsertOrUpdateStatementExecutor
@Override
public void executeBatch(Connection conn) throws SQLException {
//updateStatement.executeBatch();
//insertStatement.executeBatch();
conn.commit();
}
......
......@@ -19,13 +19,13 @@
package org.apache.flink.connector.phoenix.split;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.connector.phoenix.JdbcInputFormat;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import java.io.Serializable;
/**
* This splits generator actually does nothing but wrapping the query parameters computed by the
* user before creating the {@link JdbcInputFormat} instance.
* user before creating the {@link PhoenixInputFormat} instance.
*/
@Experimental
public class JdbcGenericParameterValuesProvider implements JdbcParameterValuesProvider {
......
......@@ -19,12 +19,12 @@
package org.apache.flink.connector.phoenix.split;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.connector.phoenix.JdbcInputFormat;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import java.io.Serializable;
/**
* This interface is used by the {@link JdbcInputFormat} to compute the list of parallel query to
* This interface is used by the {@link PhoenixInputFormat} to compute the list of parallel query to
* run (i.e. splits). Each query will be parameterized using a row of the matrix provided by each
* {@link JdbcParameterValuesProvider} implementation.
*/
......
......@@ -101,6 +101,15 @@ public interface FieldNamedPreparedStatement extends AutoCloseable {
*/
int[] executeBatch() throws SQLException;
/**
* Phoenix add Batch method
*
* @see PreparedStatement#executeBatch()
*/
void executeUpdate() throws SQLException;
/**
* Sets the designated parameter to SQL <code>NULL</code>.
*
......
......@@ -51,7 +51,12 @@ public class FieldNamedPreparedStatementImpl implements FieldNamedPreparedStatem
@Override
public void addBatch() throws SQLException {
statement.addBatch();
statement.executeUpdate();
}
@Override
public void executeUpdate() throws SQLException {
statement.executeUpdate();
}
@Override
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableBufferReducedStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableBufferedStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableInsertOrUpdateStatementExecutor;
import org.apache.flink.connector.phoenix.internal.executor.TableSimpleStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import java.io.Serializable;
import java.util.Arrays;
import java.util.function.Function;
import static org.apache.flink.table.data.RowData.createFieldGetter;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Builder for {@link JdbcBatchingOutputFormat} for Table/SQL. */
public class JdbcDynamicOutputFormatBuilder implements Serializable {
private static final long serialVersionUID = 1L;
private JdbcOptions jdbcOptions;
private JdbcExecutionOptions executionOptions;
private JdbcDmlOptions dmlOptions;
private TypeInformation<RowData> rowDataTypeInformation;
private DataType[] fieldDataTypes;
public JdbcDynamicOutputFormatBuilder() {}
public JdbcDynamicOutputFormatBuilder setJdbcOptions(JdbcOptions jdbcOptions) {
this.jdbcOptions = jdbcOptions;
return this;
}
public JdbcDynamicOutputFormatBuilder setJdbcExecutionOptions(
JdbcExecutionOptions executionOptions) {
this.executionOptions = executionOptions;
return this;
}
public JdbcDynamicOutputFormatBuilder setJdbcDmlOptions(JdbcDmlOptions dmlOptions) {
this.dmlOptions = dmlOptions;
return this;
}
public JdbcDynamicOutputFormatBuilder setRowDataTypeInfo(
TypeInformation<RowData> rowDataTypeInfo) {
this.rowDataTypeInformation = rowDataTypeInfo;
return this;
}
public JdbcDynamicOutputFormatBuilder setFieldDataTypes(DataType[] fieldDataTypes) {
this.fieldDataTypes = fieldDataTypes;
return this;
}
public JdbcBatchingOutputFormat<RowData, ?, ?> build() {
checkNotNull(jdbcOptions, "jdbc options can not be null");
checkNotNull(dmlOptions, "jdbc dml options can not be null");
checkNotNull(executionOptions, "jdbc execution options can not be null");
final LogicalType[] logicalTypes =
Arrays.stream(fieldDataTypes)
.map(DataType::getLogicalType)
.toArray(LogicalType[]::new);
if (dmlOptions.getKeyFields().isPresent() && dmlOptions.getKeyFields().get().length > 0) {
// upsert query
return new JdbcBatchingOutputFormat<>(
new SimpleJdbcConnectionProvider(jdbcOptions),
executionOptions,
ctx ->
createBufferReduceExecutor(
dmlOptions, ctx, rowDataTypeInformation, logicalTypes),
JdbcBatchingOutputFormat.RecordExtractor.identity());
} else {
// append only query
final String sql =
dmlOptions
.getDialect()
.getInsertIntoStatement(
dmlOptions.getTableName(), dmlOptions.getFieldNames());
return new JdbcBatchingOutputFormat<>(
new SimpleJdbcConnectionProvider(jdbcOptions),
executionOptions,
ctx ->
createSimpleBufferedExecutor(
ctx,
dmlOptions.getDialect(),
dmlOptions.getFieldNames(),
logicalTypes,
sql,
rowDataTypeInformation),
JdbcBatchingOutputFormat.RecordExtractor.identity());
}
}
private static JdbcBatchStatementExecutor<RowData> createBufferReduceExecutor(
JdbcDmlOptions opt,
RuntimeContext ctx,
TypeInformation<RowData> rowDataTypeInfo,
LogicalType[] fieldTypes) {
checkArgument(opt.getKeyFields().isPresent());
JdbcDialect dialect = opt.getDialect();
String tableName = opt.getTableName();
String[] pkNames = opt.getKeyFields().get();
int[] pkFields =
Arrays.stream(pkNames)
.mapToInt(Arrays.asList(opt.getFieldNames())::indexOf)
.toArray();
LogicalType[] pkTypes =
Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new);
final TypeSerializer<RowData> typeSerializer =
rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
final Function<RowData, RowData> valueTransform =
ctx.getExecutionConfig().isObjectReuseEnabled()
? typeSerializer::copy
: Function.identity();
return new TableBufferReducedStatementExecutor(
createUpsertRowExecutor(
dialect,
tableName,
opt.getFieldNames(),
fieldTypes,
pkFields,
pkNames,
pkTypes),
createDeleteExecutor(dialect, tableName, pkNames, pkTypes),
createRowKeyExtractor(fieldTypes, pkFields),
valueTransform);
}
private static JdbcBatchStatementExecutor<RowData> createSimpleBufferedExecutor(
RuntimeContext ctx,
JdbcDialect dialect,
String[] fieldNames,
LogicalType[] fieldTypes,
String sql,
TypeInformation<RowData> rowDataTypeInfo) {
final TypeSerializer<RowData> typeSerializer =
rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
return new TableBufferedStatementExecutor(
createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql),
ctx.getExecutionConfig().isObjectReuseEnabled()
? typeSerializer::copy
: Function.identity());
}
private static JdbcBatchStatementExecutor<RowData> createUpsertRowExecutor(
JdbcDialect dialect,
String tableName,
String[] fieldNames,
LogicalType[] fieldTypes,
int[] pkFields,
String[] pkNames,
LogicalType[] pkTypes) {
return dialect.getUpsertStatement(tableName, fieldNames, pkNames)
.map(sql -> createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql))
.orElseGet(
() ->
createInsertOrUpdateExecutor(
dialect,
tableName,
fieldNames,
fieldTypes,
pkFields,
pkNames,
pkTypes));
}
private static JdbcBatchStatementExecutor<RowData> createDeleteExecutor(
JdbcDialect dialect, String tableName, String[] pkNames, LogicalType[] pkTypes) {
String deleteSql = dialect.getDeleteStatement(tableName, pkNames);
return createSimpleRowExecutor(dialect, pkNames, pkTypes, deleteSql);
}
private static JdbcBatchStatementExecutor<RowData> createSimpleRowExecutor(
JdbcDialect dialect, String[] fieldNames, LogicalType[] fieldTypes, final String sql) {
final JdbcRowConverter rowConverter = dialect.getRowConverter(RowType.of(fieldTypes));
return new TableSimpleStatementExecutor(
connection ->
FieldNamedPreparedStatement.prepareStatement(connection, sql, fieldNames),
rowConverter);
}
private static JdbcBatchStatementExecutor<RowData> createInsertOrUpdateExecutor(
JdbcDialect dialect,
String tableName,
String[] fieldNames,
LogicalType[] fieldTypes,
int[] pkFields,
String[] pkNames,
LogicalType[] pkTypes) {
final String existStmt = dialect.getRowExistsStatement(tableName, pkNames);
final String insertStmt = dialect.getInsertIntoStatement(tableName, fieldNames);
final String updateStmt = dialect.getUpdateStatement(tableName, fieldNames, pkNames);
return new TableInsertOrUpdateStatementExecutor(
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, existStmt, pkNames),
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, insertStmt, fieldNames),
connection ->
FieldNamedPreparedStatement.prepareStatement(
connection, updateStmt, fieldNames),
dialect.getRowConverter(RowType.of(pkTypes)),
dialect.getRowConverter(RowType.of(fieldTypes)),
dialect.getRowConverter(RowType.of(fieldTypes)),
createRowKeyExtractor(fieldTypes, pkFields));
}
private static Function<RowData, RowData> createRowKeyExtractor(
LogicalType[] logicalTypes, int[] pkFields) {
final RowData.FieldGetter[] fieldGetters = new RowData.FieldGetter[pkFields.length];
for (int i = 0; i < pkFields.length; i++) {
fieldGetters[i] = createFieldGetter(logicalTypes[pkFields[i]], pkFields[i]);
}
return row -> getPrimaryKey(row, fieldGetters);
}
private static RowData getPrimaryKey(RowData row, RowData.FieldGetter[] fieldGetters) {
GenericRowData pkRow = new GenericRowData(fieldGetters.length);
for (int i = 0; i < fieldGetters.length; i++) {
pkRow.setField(i, fieldGetters[i].getFieldOrNull(row));
}
return pkRow;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import static org.apache.flink.util.Preconditions.checkState;
/**
* Factory for creating configured instances of {@link JdbcDynamicTableSource} and {@link
* JdbcDynamicTableSink}.
*/
@Internal
public class JdbcDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
public static final String IDENTIFIER = "jdbc";
public static final ConfigOption<String> URL =
ConfigOptions.key("url")
.stringType()
.noDefaultValue()
.withDescription("The JDBC database URL.");
public static final ConfigOption<String> TABLE_NAME =
ConfigOptions.key("table-name")
.stringType()
.noDefaultValue()
.withDescription("The JDBC table name.");
public static final ConfigOption<String> USERNAME =
ConfigOptions.key("username")
.stringType()
.noDefaultValue()
.withDescription("The JDBC user name.");
public static final ConfigOption<String> PASSWORD =
ConfigOptions.key("password")
.stringType()
.noDefaultValue()
.withDescription("The JDBC password.");
private static final ConfigOption<String> DRIVER =
ConfigOptions.key("driver")
.stringType()
.noDefaultValue()
.withDescription(
"The class name of the JDBC driver to use to connect to this URL. "
+ "If not set, it will automatically be derived from the URL.");
public static final ConfigOption<Duration> MAX_RETRY_TIMEOUT =
ConfigOptions.key("connection.max-retry-timeout")
.durationType()
.defaultValue(Duration.ofSeconds(60))
.withDescription("Maximum timeout between retries.");
// read config options
private static final ConfigOption<String> SCAN_PARTITION_COLUMN =
ConfigOptions.key("scan.partition.column")
.stringType()
.noDefaultValue()
.withDescription("The column name used for partitioning the input.");
private static final ConfigOption<Integer> SCAN_PARTITION_NUM =
ConfigOptions.key("scan.partition.num")
.intType()
.noDefaultValue()
.withDescription("The number of partitions.");
private static final ConfigOption<Long> SCAN_PARTITION_LOWER_BOUND =
ConfigOptions.key("scan.partition.lower-bound")
.longType()
.noDefaultValue()
.withDescription("The smallest value of the first partition.");
private static final ConfigOption<Long> SCAN_PARTITION_UPPER_BOUND =
ConfigOptions.key("scan.partition.upper-bound")
.longType()
.noDefaultValue()
.withDescription("The largest value of the last partition.");
private static final ConfigOption<Integer> SCAN_FETCH_SIZE =
ConfigOptions.key("scan.fetch-size")
.intType()
.defaultValue(0)
.withDescription(
"Gives the reader a hint as to the number of rows that should be fetched "
+ "from the database per round-trip when reading. "
+ "If the value is zero, this hint is ignored.");
private static final ConfigOption<Boolean> SCAN_AUTO_COMMIT =
ConfigOptions.key("scan.auto-commit")
.booleanType()
.defaultValue(true)
.withDescription("Sets whether the driver is in auto-commit mode.");
// look up config options
private static final ConfigOption<Long> LOOKUP_CACHE_MAX_ROWS =
ConfigOptions.key("lookup.cache.max-rows")
.longType()
.defaultValue(-1L)
.withDescription(
"The max number of rows of lookup cache, over this value, the oldest rows will "
+ "be eliminated. \"cache.max-rows\" and \"cache.ttl\" options must all be specified if any of them is "
+ "specified.");
private static final ConfigOption<Duration> LOOKUP_CACHE_TTL =
ConfigOptions.key("lookup.cache.ttl")
.durationType()
.defaultValue(Duration.ofSeconds(10))
.withDescription("The cache time to live.");
private static final ConfigOption<Integer> LOOKUP_MAX_RETRIES =
ConfigOptions.key("lookup.max-retries")
.intType()
.defaultValue(3)
.withDescription("The max retry times if lookup database failed.");
// write config options
private static final ConfigOption<Integer> SINK_BUFFER_FLUSH_MAX_ROWS =
ConfigOptions.key("sink.buffer-flush.max-rows")
.intType()
.defaultValue(100)
.withDescription(
"The flush max size (includes all append, upsert and delete records), over this number"
+ " of records, will flush data.");
private static final ConfigOption<Duration> SINK_BUFFER_FLUSH_INTERVAL =
ConfigOptions.key("sink.buffer-flush.interval")
.durationType()
.defaultValue(Duration.ofSeconds(1))
.withDescription(
"The flush interval mills, over this time, asynchronous threads will flush data.");
private static final ConfigOption<Integer> SINK_MAX_RETRIES =
ConfigOptions.key("sink.max-retries")
.intType()
.defaultValue(3)
.withDescription("The max retry times if writing records to database failed.");
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig config = helper.getOptions();
helper.validate();
validateConfigOptions(config);
JdbcOptions jdbcOptions = getJdbcOptions(config);
TableSchema physicalSchema =
TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
return new JdbcDynamicTableSink(
jdbcOptions,
getJdbcExecutionOptions(config),
getJdbcDmlOptions(jdbcOptions, physicalSchema),
physicalSchema);
}
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig config = helper.getOptions();
helper.validate();
validateConfigOptions(config);
TableSchema physicalSchema =
TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
return new JdbcDynamicTableSource(
getJdbcOptions(helper.getOptions()),
getJdbcReadOptions(helper.getOptions()),
getJdbcLookupOptions(helper.getOptions()),
physicalSchema);
}
private JdbcOptions getJdbcOptions(ReadableConfig readableConfig) {
final String url = readableConfig.get(URL);
final JdbcOptions.Builder builder =
JdbcOptions.builder()
.setDBUrl(url)
.setTableName(readableConfig.get(TABLE_NAME))
.setDialect(JdbcDialects.get(url).get())
.setParallelism(
readableConfig
.getOptional(FactoryUtil.SINK_PARALLELISM)
.orElse(null))
.setConnectionCheckTimeoutSeconds(
(int) readableConfig.get(MAX_RETRY_TIMEOUT).getSeconds());
readableConfig.getOptional(DRIVER).ifPresent(builder::setDriverName);
readableConfig.getOptional(USERNAME).ifPresent(builder::setUsername);
readableConfig.getOptional(PASSWORD).ifPresent(builder::setPassword);
return builder.build();
}
private JdbcReadOptions getJdbcReadOptions(ReadableConfig readableConfig) {
final Optional<String> partitionColumnName =
readableConfig.getOptional(SCAN_PARTITION_COLUMN);
final JdbcReadOptions.Builder builder = JdbcReadOptions.builder();
if (partitionColumnName.isPresent()) {
builder.setPartitionColumnName(partitionColumnName.get());
builder.setPartitionLowerBound(readableConfig.get(SCAN_PARTITION_LOWER_BOUND));
builder.setPartitionUpperBound(readableConfig.get(SCAN_PARTITION_UPPER_BOUND));
builder.setNumPartitions(readableConfig.get(SCAN_PARTITION_NUM));
}
readableConfig.getOptional(SCAN_FETCH_SIZE).ifPresent(builder::setFetchSize);
builder.setAutoCommit(readableConfig.get(SCAN_AUTO_COMMIT));
return builder.build();
}
private JdbcLookupOptions getJdbcLookupOptions(ReadableConfig readableConfig) {
return new JdbcLookupOptions(
readableConfig.get(LOOKUP_CACHE_MAX_ROWS),
readableConfig.get(LOOKUP_CACHE_TTL).toMillis(),
readableConfig.get(LOOKUP_MAX_RETRIES));
}
private JdbcExecutionOptions getJdbcExecutionOptions(ReadableConfig config) {
final JdbcExecutionOptions.Builder builder = new JdbcExecutionOptions.Builder();
builder.withBatchSize(config.get(SINK_BUFFER_FLUSH_MAX_ROWS));
builder.withBatchIntervalMs(config.get(SINK_BUFFER_FLUSH_INTERVAL).toMillis());
builder.withMaxRetries(config.get(SINK_MAX_RETRIES));
return builder.build();
}
private JdbcDmlOptions getJdbcDmlOptions(JdbcOptions jdbcOptions, TableSchema schema) {
String[] keyFields =
schema.getPrimaryKey()
.map(pk -> pk.getColumns().toArray(new String[0]))
.orElse(null);
return JdbcDmlOptions.builder()
.withTableName(jdbcOptions.getTableName())
.withDialect(jdbcOptions.getDialect())
.withFieldNames(schema.getFieldNames())
.withKeyFields(keyFields)
.build();
}
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> requiredOptions = new HashSet<>();
requiredOptions.add(URL);
requiredOptions.add(TABLE_NAME);
return requiredOptions;
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> optionalOptions = new HashSet<>();
optionalOptions.add(DRIVER);
optionalOptions.add(USERNAME);
optionalOptions.add(PASSWORD);
optionalOptions.add(SCAN_PARTITION_COLUMN);
optionalOptions.add(SCAN_PARTITION_LOWER_BOUND);
optionalOptions.add(SCAN_PARTITION_UPPER_BOUND);
optionalOptions.add(SCAN_PARTITION_NUM);
optionalOptions.add(SCAN_FETCH_SIZE);
optionalOptions.add(SCAN_AUTO_COMMIT);
optionalOptions.add(LOOKUP_CACHE_MAX_ROWS);
optionalOptions.add(LOOKUP_CACHE_TTL);
optionalOptions.add(LOOKUP_MAX_RETRIES);
optionalOptions.add(SINK_BUFFER_FLUSH_MAX_ROWS);
optionalOptions.add(SINK_BUFFER_FLUSH_INTERVAL);
optionalOptions.add(SINK_MAX_RETRIES);
optionalOptions.add(FactoryUtil.SINK_PARALLELISM);
optionalOptions.add(MAX_RETRY_TIMEOUT);
return optionalOptions;
}
private void validateConfigOptions(ReadableConfig config) {
String jdbcUrl = config.get(URL);
final Optional<JdbcDialect> dialect = JdbcDialects.get(jdbcUrl);
checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + jdbcUrl);
checkAllOrNone(config, new ConfigOption[] {USERNAME, PASSWORD});
checkAllOrNone(
config,
new ConfigOption[] {
SCAN_PARTITION_COLUMN,
SCAN_PARTITION_NUM,
SCAN_PARTITION_LOWER_BOUND,
SCAN_PARTITION_UPPER_BOUND
});
if (config.getOptional(SCAN_PARTITION_LOWER_BOUND).isPresent()
&& config.getOptional(SCAN_PARTITION_UPPER_BOUND).isPresent()) {
long lowerBound = config.get(SCAN_PARTITION_LOWER_BOUND);
long upperBound = config.get(SCAN_PARTITION_UPPER_BOUND);
if (lowerBound > upperBound) {
throw new IllegalArgumentException(
String.format(
"'%s'='%s' must not be larger than '%s'='%s'.",
SCAN_PARTITION_LOWER_BOUND.key(),
lowerBound,
SCAN_PARTITION_UPPER_BOUND.key(),
upperBound));
}
}
checkAllOrNone(config, new ConfigOption[] {LOOKUP_CACHE_MAX_ROWS, LOOKUP_CACHE_TTL});
if (config.get(LOOKUP_MAX_RETRIES) < 0) {
throw new IllegalArgumentException(
String.format(
"The value of '%s' option shouldn't be negative, but is %s.",
LOOKUP_MAX_RETRIES.key(), config.get(LOOKUP_MAX_RETRIES)));
}
if (config.get(SINK_MAX_RETRIES) < 0) {
throw new IllegalArgumentException(
String.format(
"The value of '%s' option shouldn't be negative, but is %s.",
SINK_MAX_RETRIES.key(), config.get(SINK_MAX_RETRIES)));
}
if (config.get(MAX_RETRY_TIMEOUT).getSeconds() <= 0) {
throw new IllegalArgumentException(
String.format(
"The value of '%s' option must be in second granularity and shouldn't be smaller than 1 second, but is %s.",
MAX_RETRY_TIMEOUT.key(),
config.get(
ConfigOptions.key(MAX_RETRY_TIMEOUT.key())
.stringType()
.noDefaultValue())));
}
}
private void checkAllOrNone(ReadableConfig config, ConfigOption<?>[] configOptions) {
int presentCount = 0;
for (ConfigOption configOption : configOptions) {
if (config.getOptional(configOption).isPresent()) {
presentCount++;
}
}
String[] propertyNames =
Arrays.stream(configOptions).map(ConfigOption::key).toArray(String[]::new);
Preconditions.checkArgument(
configOptions.length == presentCount || presentCount == 0,
"Either all or none of the following options should be provided:\n"
+ String.join("\n", propertyNames));
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction;
import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import java.util.Objects;
import static org.apache.flink.util.Preconditions.checkState;
/** A {@link DynamicTableSink} for JDBC. */
@Internal
public class JdbcDynamicTableSink implements DynamicTableSink {
private final JdbcOptions jdbcOptions;
private final JdbcExecutionOptions executionOptions;
private final JdbcDmlOptions dmlOptions;
private final TableSchema tableSchema;
private final String dialectName;
public JdbcDynamicTableSink(
JdbcOptions jdbcOptions,
JdbcExecutionOptions executionOptions,
JdbcDmlOptions dmlOptions,
TableSchema tableSchema) {
this.jdbcOptions = jdbcOptions;
this.executionOptions = executionOptions;
this.dmlOptions = dmlOptions;
this.tableSchema = tableSchema;
this.dialectName = dmlOptions.getDialect().dialectName();
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
validatePrimaryKey(requestedMode);
return ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.DELETE)
.addContainedKind(RowKind.UPDATE_AFTER)
.build();
}
private void validatePrimaryKey(ChangelogMode requestedMode) {
checkState(
ChangelogMode.insertOnly().equals(requestedMode)
|| dmlOptions.getKeyFields().isPresent(),
"please declare primary key for sink table when query contains update/delete record.");
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final TypeInformation<RowData> rowDataTypeInformation =
context.createTypeInformation(tableSchema.toRowDataType());
final JdbcDynamicOutputFormatBuilder builder = new JdbcDynamicOutputFormatBuilder();
builder.setJdbcOptions(jdbcOptions);
builder.setJdbcDmlOptions(dmlOptions);
builder.setJdbcExecutionOptions(executionOptions);
builder.setRowDataTypeInfo(rowDataTypeInformation);
builder.setFieldDataTypes(tableSchema.getFieldDataTypes());
return SinkFunctionProvider.of(
new GenericJdbcSinkFunction<>(builder.build()), jdbcOptions.getParallelism());
}
@Override
public DynamicTableSink copy() {
return new JdbcDynamicTableSink(jdbcOptions, executionOptions, dmlOptions, tableSchema);
}
@Override
public String asSummaryString() {
return "JDBC:" + dialectName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof JdbcDynamicTableSink)) {
return false;
}
JdbcDynamicTableSink that = (JdbcDynamicTableSink) o;
return Objects.equals(jdbcOptions, that.jdbcOptions)
&& Objects.equals(executionOptions, that.executionOptions)
&& Objects.equals(dmlOptions, that.dmlOptions)
&& Objects.equals(tableSchema, that.tableSchema)
&& Objects.equals(dialectName, that.dialectName);
}
@Override
public int hashCode() {
return Objects.hash(jdbcOptions, executionOptions, dmlOptions, tableSchema, dialectName);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions;
import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.InputFormatProvider;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.ScanTableSource;
import org.apache.flink.table.connector.source.TableFunctionProvider;
import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.util.Objects;
/** A {@link DynamicTableSource} for JDBC. */
@Internal
public class JdbcDynamicTableSource
implements ScanTableSource,
LookupTableSource,
SupportsProjectionPushDown,
SupportsLimitPushDown {
private final JdbcOptions options;
private final JdbcReadOptions readOptions;
private final JdbcLookupOptions lookupOptions;
private TableSchema physicalSchema;
private final String dialectName;
private long limit = -1;
public JdbcDynamicTableSource(
JdbcOptions options,
JdbcReadOptions readOptions,
JdbcLookupOptions lookupOptions,
TableSchema physicalSchema) {
this.options = options;
this.readOptions = readOptions;
this.lookupOptions = lookupOptions;
this.physicalSchema = physicalSchema;
this.dialectName = options.getDialect().dialectName();
}
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
// JDBC only support non-nested look up keys
String[] keyNames = new String[context.getKeys().length];
for (int i = 0; i < keyNames.length; i++) {
int[] innerKeyArr = context.getKeys()[i];
Preconditions.checkArgument(
innerKeyArr.length == 1, "JDBC only support non-nested look up keys");
keyNames[i] = physicalSchema.getFieldNames()[innerKeyArr[0]];
}
final RowType rowType = (RowType) physicalSchema.toRowDataType().getLogicalType();
return TableFunctionProvider.of(
new JdbcRowDataLookupFunction(
options,
lookupOptions,
physicalSchema.getFieldNames(),
physicalSchema.getFieldDataTypes(),
keyNames,
rowType));
}
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
final JdbcRowDataInputFormat.Builder builder =
JdbcRowDataInputFormat.builder()
.setDrivername(options.getDriverName())
.setDBUrl(options.getDbURL())
.setUsername(options.getUsername().orElse(null))
.setPassword(options.getPassword().orElse(null))
.setAutoCommit(readOptions.getAutoCommit());
if (readOptions.getFetchSize() != 0) {
builder.setFetchSize(readOptions.getFetchSize());
}
final JdbcDialect dialect = options.getDialect();
String query =
dialect.getSelectFromStatement(
options.getTableName(), physicalSchema.getFieldNames(), new String[0]);
if (readOptions.getPartitionColumnName().isPresent()) {
long lowerBound = readOptions.getPartitionLowerBound().get();
long upperBound = readOptions.getPartitionUpperBound().get();
int numPartitions = readOptions.getNumPartitions().get();
builder.setParametersProvider(
new JdbcNumericBetweenParametersProvider(lowerBound, upperBound)
.ofBatchNum(numPartitions));
query +=
" WHERE "
+ dialect.quoteIdentifier(readOptions.getPartitionColumnName().get())
+ " BETWEEN ? AND ?";
}
if (limit >= 0) {
query = String.format("%s %s", query, dialect.getLimitClause(limit));
}
builder.setQuery(query);
final RowType rowType = (RowType) physicalSchema.toRowDataType().getLogicalType();
builder.setRowConverter(dialect.getRowConverter(rowType));
builder.setRowDataTypeInfo(
runtimeProviderContext.createTypeInformation(physicalSchema.toRowDataType()));
return InputFormatProvider.of(builder.build());
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
@Override
public boolean supportsNestedProjection() {
// JDBC doesn't support nested projection
return false;
}
@Override
public void applyProjection(int[][] projectedFields) {
this.physicalSchema = TableSchemaUtils.projectSchema(physicalSchema, projectedFields);
}
@Override
public DynamicTableSource copy() {
return new JdbcDynamicTableSource(options, readOptions, lookupOptions, physicalSchema);
}
@Override
public String asSummaryString() {
return "JDBC:" + dialectName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof JdbcDynamicTableSource)) {
return false;
}
JdbcDynamicTableSource that = (JdbcDynamicTableSource) o;
return Objects.equals(options, that.options)
&& Objects.equals(readOptions, that.readOptions)
&& Objects.equals(lookupOptions, that.lookupOptions)
&& Objects.equals(physicalSchema, that.physicalSchema)
&& Objects.equals(dialectName, that.dialectName)
&& Objects.equals(limit, that.limit);
}
@Override
public int hashCode() {
return Objects.hash(
options, readOptions, lookupOptions, physicalSchema, dialectName, limit);
}
@Override
public void applyLimit(long limit) {
this.limit = limit;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.io.DefaultInputSplitAssigner;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.JdbcConnectionOptions;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.split.JdbcParameterValuesProvider;
import org.apache.flink.core.io.GenericInputSplit;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.flink.table.data.RowData;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.Array;
import java.sql.Connection;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Arrays;
/** InputFormat for {@link JdbcDynamicTableSource}. */
@Internal
public class JdbcRowDataInputFormat extends RichInputFormat<RowData, InputSplit>
implements ResultTypeQueryable<RowData> {
private static final long serialVersionUID = 2L;
private static final Logger LOG = LoggerFactory.getLogger(JdbcRowDataInputFormat.class);
private JdbcConnectionProvider connectionProvider;
private int fetchSize;
private Boolean autoCommit;
private Object[][] parameterValues;
private String queryTemplate;
private int resultSetType;
private int resultSetConcurrency;
private JdbcRowConverter rowConverter;
private TypeInformation<RowData> rowDataTypeInfo;
private transient PreparedStatement statement;
private transient ResultSet resultSet;
private transient boolean hasNext;
private JdbcRowDataInputFormat(
JdbcConnectionProvider connectionProvider,
int fetchSize,
Boolean autoCommit,
Object[][] parameterValues,
String queryTemplate,
int resultSetType,
int resultSetConcurrency,
JdbcRowConverter rowConverter,
TypeInformation<RowData> rowDataTypeInfo) {
this.connectionProvider = connectionProvider;
this.fetchSize = fetchSize;
this.autoCommit = autoCommit;
this.parameterValues = parameterValues;
this.queryTemplate = queryTemplate;
this.resultSetType = resultSetType;
this.resultSetConcurrency = resultSetConcurrency;
this.rowConverter = rowConverter;
this.rowDataTypeInfo = rowDataTypeInfo;
}
@Override
public void configure(Configuration parameters) {
// do nothing here
}
@Override
public void openInputFormat() {
// called once per inputFormat (on open)
try {
Connection dbConn = connectionProvider.getOrEstablishConnection();
// set autoCommit mode only if it was explicitly configured.
// keep connection default otherwise.
if (autoCommit != null) {
dbConn.setAutoCommit(autoCommit);
}
statement = dbConn.prepareStatement(queryTemplate, resultSetType, resultSetConcurrency);
if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) {
statement.setFetchSize(fetchSize);
}
} catch (SQLException se) {
throw new IllegalArgumentException("open() failed." + se.getMessage(), se);
} catch (ClassNotFoundException cnfe) {
throw new IllegalArgumentException(
"JDBC-Class not found. - " + cnfe.getMessage(), cnfe);
}
}
@Override
public void closeInputFormat() {
// called once per inputFormat (on close)
try {
if (statement != null) {
statement.close();
}
} catch (SQLException se) {
LOG.info("Inputformat Statement couldn't be closed - " + se.getMessage());
} finally {
statement = null;
}
connectionProvider.closeConnection();
parameterValues = null;
}
/**
* Connects to the source database and executes the query in a <b>parallel fashion</b> if this
* {@link InputFormat} is built using a parameterized query (i.e. using a {@link
* PreparedStatement}) and a proper {@link JdbcParameterValuesProvider}, in a <b>non-parallel
* fashion</b> otherwise.
*
* @param inputSplit which is ignored if this InputFormat is executed as a non-parallel source,
* a "hook" to the query parameters otherwise (using its <i>splitNumber</i>)
* @throws IOException if there's an error during the execution of the query
*/
@Override
public void open(InputSplit inputSplit) throws IOException {
try {
if (inputSplit != null && parameterValues != null) {
for (int i = 0; i < parameterValues[inputSplit.getSplitNumber()].length; i++) {
Object param = parameterValues[inputSplit.getSplitNumber()][i];
if (param instanceof String) {
statement.setString(i + 1, (String) param);
} else if (param instanceof Long) {
statement.setLong(i + 1, (Long) param);
} else if (param instanceof Integer) {
statement.setInt(i + 1, (Integer) param);
} else if (param instanceof Double) {
statement.setDouble(i + 1, (Double) param);
} else if (param instanceof Boolean) {
statement.setBoolean(i + 1, (Boolean) param);
} else if (param instanceof Float) {
statement.setFloat(i + 1, (Float) param);
} else if (param instanceof BigDecimal) {
statement.setBigDecimal(i + 1, (BigDecimal) param);
} else if (param instanceof Byte) {
statement.setByte(i + 1, (Byte) param);
} else if (param instanceof Short) {
statement.setShort(i + 1, (Short) param);
} else if (param instanceof Date) {
statement.setDate(i + 1, (Date) param);
} else if (param instanceof Time) {
statement.setTime(i + 1, (Time) param);
} else if (param instanceof Timestamp) {
statement.setTimestamp(i + 1, (Timestamp) param);
} else if (param instanceof Array) {
statement.setArray(i + 1, (Array) param);
} else {
// extends with other types if needed
throw new IllegalArgumentException(
"open() failed. Parameter "
+ i
+ " of type "
+ param.getClass()
+ " is not handled (yet).");
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(
String.format(
"Executing '%s' with parameters %s",
queryTemplate,
Arrays.deepToString(
parameterValues[inputSplit.getSplitNumber()])));
}
}
resultSet = statement.executeQuery();
hasNext = resultSet.next();
} catch (SQLException se) {
throw new IllegalArgumentException("open() failed." + se.getMessage(), se);
}
}
/**
* Closes all resources used.
*
* @throws IOException Indicates that a resource could not be closed.
*/
@Override
public void close() throws IOException {
if (resultSet == null) {
return;
}
try {
resultSet.close();
} catch (SQLException se) {
LOG.info("Inputformat ResultSet couldn't be closed - " + se.getMessage());
}
}
@Override
public TypeInformation<RowData> getProducedType() {
return rowDataTypeInfo;
}
/**
* Checks whether all data has been read.
*
* @return boolean value indication whether all data has been read.
* @throws IOException
*/
@Override
public boolean reachedEnd() throws IOException {
return !hasNext;
}
/**
* Stores the next resultSet row in a tuple.
*
* @param reuse row to be reused.
* @return row containing next {@link RowData}
* @throws IOException
*/
@Override
public RowData nextRecord(RowData reuse) throws IOException {
try {
if (!hasNext) {
return null;
}
RowData row = rowConverter.toInternal(resultSet);
// update hasNext after we've read the record
hasNext = resultSet.next();
return row;
} catch (SQLException se) {
throw new IOException("Couldn't read data - " + se.getMessage(), se);
} catch (NullPointerException npe) {
throw new IOException("Couldn't access resultSet", npe);
}
}
@Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException {
return cachedStatistics;
}
@Override
public InputSplit[] createInputSplits(int minNumSplits) throws IOException {
if (parameterValues == null) {
return new GenericInputSplit[] {new GenericInputSplit(0, 1)};
}
GenericInputSplit[] ret = new GenericInputSplit[parameterValues.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = new GenericInputSplit(i, ret.length);
}
return ret;
}
@Override
public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) {
return new DefaultInputSplitAssigner(inputSplits);
}
/**
* A builder used to set parameters to the output format's configuration in a fluent way.
*
* @return builder
*/
public static Builder builder() {
return new Builder();
}
/** Builder for {@link JdbcRowDataInputFormat}. */
public static class Builder {
private JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder;
private int fetchSize;
private Boolean autoCommit;
private Object[][] parameterValues;
private String queryTemplate;
private JdbcRowConverter rowConverter;
private TypeInformation<RowData> rowDataTypeInfo;
private int resultSetType = ResultSet.TYPE_FORWARD_ONLY;
private int resultSetConcurrency = ResultSet.CONCUR_READ_ONLY;
public Builder() {
this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder();
}
public Builder setDrivername(String drivername) {
this.connOptionsBuilder.withDriverName(drivername);
return this;
}
public Builder setDBUrl(String dbURL) {
this.connOptionsBuilder.withUrl(dbURL);
return this;
}
public Builder setUsername(String username) {
this.connOptionsBuilder.withUsername(username);
return this;
}
public Builder setPassword(String password) {
this.connOptionsBuilder.withPassword(password);
return this;
}
public Builder setQuery(String query) {
this.queryTemplate = query;
return this;
}
public Builder setParametersProvider(JdbcParameterValuesProvider parameterValuesProvider) {
this.parameterValues = parameterValuesProvider.getParameterValues();
return this;
}
public Builder setRowDataTypeInfo(TypeInformation<RowData> rowDataTypeInfo) {
this.rowDataTypeInfo = rowDataTypeInfo;
return this;
}
public Builder setRowConverter(JdbcRowConverter rowConverter) {
this.rowConverter = rowConverter;
return this;
}
public Builder setFetchSize(int fetchSize) {
Preconditions.checkArgument(
fetchSize == Integer.MIN_VALUE || fetchSize > 0,
"Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.",
fetchSize);
this.fetchSize = fetchSize;
return this;
}
public Builder setAutoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
public Builder setResultSetType(int resultSetType) {
this.resultSetType = resultSetType;
return this;
}
public Builder setResultSetConcurrency(int resultSetConcurrency) {
this.resultSetConcurrency = resultSetConcurrency;
return this;
}
public JdbcRowDataInputFormat build() {
if (this.queryTemplate == null) {
throw new NullPointerException("No query supplied");
}
if (this.rowConverter == null) {
throw new NullPointerException("No row converter supplied");
}
if (this.parameterValues == null) {
LOG.debug("No input splitting configured (data will be read with parallelism 1).");
}
return new JdbcRowDataInputFormat(
new SimpleJdbcConnectionProvider(connOptionsBuilder.build()),
this.fetchSize,
this.autoCommit,
this.parameterValues,
this.queryTemplate,
this.resultSetType,
this.resultSetConcurrency,
this.rowConverter,
this.rowDataTypeInfo);
}
}
}
......@@ -8,11 +8,12 @@ import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.InputFormatProvider;
import org.apache.flink.table.connector.source.ScanTableSource;
import org.apache.flink.table.connector.source.*;
import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.utils.TableSchemaUtils;
import org.apache.flink.util.Preconditions;
import java.util.Objects;
......@@ -22,7 +23,8 @@ import java.util.Objects;
* @author gy
* @since 2022/3/17 10:40
**/
public class PhoenixDynamicTableSource implements ScanTableSource {
public class PhoenixDynamicTableSource implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown,
SupportsLimitPushDown {
private final PhoenixJdbcOptions options;
private final JdbcReadOptions readOptions;
......@@ -40,6 +42,28 @@ public class PhoenixDynamicTableSource implements ScanTableSource {
}
@Override
public LookupTableSource.LookupRuntimeProvider getLookupRuntimeProvider(LookupTableSource.LookupContext context) {
// JDBC only support non-nested look up keys
String[] keyNames = new String[context.getKeys().length];
for (int i = 0; i < keyNames.length; i++) {
int[] innerKeyArr = context.getKeys()[i];
Preconditions.checkArgument(
innerKeyArr.length == 1, "JDBC only support non-nested look up keys");
keyNames[i] = physicalSchema.getFieldNames()[innerKeyArr[0]];
}
final RowType rowType = (RowType) physicalSchema.toRowDataType().getLogicalType();
return TableFunctionProvider.of(
new PhoenixRowDataLookupFunction(
options,
lookupOptions,
physicalSchema.getFieldNames(),
physicalSchema.getFieldDataTypes(),
keyNames,
rowType));
}
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
PhoenixJdbcRowDataInputFormat.Builder builder = PhoenixJdbcRowDataInputFormat.builder()
......@@ -82,11 +106,11 @@ public class PhoenixDynamicTableSource implements ScanTableSource {
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
@Override
public boolean supportsNestedProjection() {
return false;
}
@Override
public void applyProjection(int[][] projectedFields) {
this.physicalSchema = TableSchemaUtils.projectSchema(this.physicalSchema, projectedFields);
}
......
......@@ -22,19 +22,17 @@ import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
import org.apache.flink.connector.phoenix.utils.JdbcUtils;
import org.apache.flink.shaded.guava18.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
import org.apache.flink.shaded.guava18.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -63,9 +61,9 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
* <p>Support cache the result to avoid frequent accessing to remote databases. 1.The cacheMaxSize
* is -1 means not use cache. 2.For real-time data, you need to set the TTL of cache.
*/
public class JdbcLookupFunction extends TableFunction<Row> {
public class PhoenixLookupFunction extends TableFunction<Row> {
private static final Logger LOG = LoggerFactory.getLogger(JdbcLookupFunction.class);
private static final Logger LOG = LoggerFactory.getLogger(PhoenixLookupFunction.class);
private static final long serialVersionUID = 2L;
private final String query;
......@@ -83,13 +81,13 @@ public class JdbcLookupFunction extends TableFunction<Row> {
private transient PreparedStatement statement;
private transient Cache<Row, List<Row>> cache;
public JdbcLookupFunction(
public PhoenixLookupFunction(
JdbcOptions options,
JdbcLookupOptions lookupOptions,
String[] fieldNames,
TypeInformation[] fieldTypes,
String[] keyNames) {
this.connectionProvider = new SimpleJdbcConnectionProvider(options);
this.connectionProvider = new PhoneixJdbcConnectionProvider(options);
this.fieldNames = fieldNames;
this.fieldTypes = fieldTypes;
this.keyNames = keyNames;
......@@ -255,7 +253,7 @@ public class JdbcLookupFunction extends TableFunction<Row> {
return keyTypes;
}
/** Builder for a {@link JdbcLookupFunction}. */
/** Builder for a {@link PhoenixLookupFunction}. */
public static class Builder {
private JdbcOptions options;
private JdbcLookupOptions lookupOptions;
......@@ -298,7 +296,7 @@ public class JdbcLookupFunction extends TableFunction<Row> {
*
* @return Configured JdbcLookupFunction
*/
public JdbcLookupFunction build() {
public PhoenixLookupFunction build() {
checkNotNull(options, "No JdbcOptions supplied.");
if (lookupOptions == null) {
lookupOptions = JdbcLookupOptions.builder().build();
......@@ -307,7 +305,7 @@ public class JdbcLookupFunction extends TableFunction<Row> {
checkNotNull(fieldTypes, "No fieldTypes supplied.");
checkNotNull(keyNames, "No keyNames supplied.");
return new JdbcLookupFunction(options, lookupOptions, fieldNames, fieldTypes, keyNames);
return new PhoenixLookupFunction(options, lookupOptions, fieldNames, fieldTypes, keyNames);
}
}
}
......@@ -23,11 +23,13 @@ import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.dialect.JdbcDialects;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.SimpleJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions;
import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement;
import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter;
import org.apache.flink.shaded.guava18.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.FunctionContext;
......@@ -35,10 +37,6 @@ import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.shaded.guava18.com.google.common.cache.Cache;
import org.apache.flink.shaded.guava18.com.google.common.cache.CacheBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -56,9 +54,9 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
/** A lookup function for {@link JdbcDynamicTableSource}. */
@Internal
public class JdbcRowDataLookupFunction extends TableFunction<RowData> {
public class PhoenixRowDataLookupFunction extends TableFunction<RowData> {
private static final Logger LOG = LoggerFactory.getLogger(JdbcRowDataLookupFunction.class);
private static final Logger LOG = LoggerFactory.getLogger(PhoenixRowDataLookupFunction.class);
private static final long serialVersionUID = 2L;
private final String query;
......@@ -75,8 +73,8 @@ public class JdbcRowDataLookupFunction extends TableFunction<RowData> {
private transient FieldNamedPreparedStatement statement;
private transient Cache<RowData, List<RowData>> cache;
public JdbcRowDataLookupFunction(
JdbcOptions options,
public PhoenixRowDataLookupFunction(
PhoenixJdbcOptions options,
JdbcLookupOptions lookupOptions,
String[] fieldNames,
DataType[] fieldTypes,
......@@ -86,7 +84,7 @@ public class JdbcRowDataLookupFunction extends TableFunction<RowData> {
checkNotNull(fieldNames, "No fieldNames supplied.");
checkNotNull(fieldTypes, "No fieldTypes supplied.");
checkNotNull(keyNames, "No keyNames supplied.");
this.connectionProvider = new SimpleJdbcConnectionProvider(options);
this.connectionProvider = new PhoneixJdbcConnectionProvider(options,options.getNamespaceMappingEnabled(),options.getMapSystemTablesToNamespace());
this.keyNames = keyNames;
List<String> nameList = Arrays.asList(fieldNames);
this.keyTypes =
......
......@@ -19,7 +19,7 @@
package org.apache.flink.connector.phoenix.table;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.connector.phoenix.JdbcInputFormat;
import org.apache.flink.connector.phoenix.PhoenixInputFormat;
import org.apache.flink.connector.phoenix.dialect.JdbcDialect;
import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
......@@ -48,7 +48,7 @@ import static org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToL
import static org.apache.flink.util.Preconditions.checkNotNull;
/** {@link TableSource} for JDBC. */
public class JdbcTableSource
public class PhoenixTableSource
implements StreamTableSource<Row>, ProjectableTableSource<Row>, LookupableTableSource<Row> {
private final JdbcOptions options;
......@@ -60,7 +60,7 @@ public class JdbcTableSource
private final int[] selectFields;
private final DataType producedDataType;
private JdbcTableSource(
private PhoenixTableSource(
JdbcOptions options,
JdbcReadOptions readOptions,
JdbcLookupOptions lookupOptions,
......@@ -68,7 +68,7 @@ public class JdbcTableSource
this(options, readOptions, lookupOptions, schema, null);
}
private JdbcTableSource(
private PhoenixTableSource(
JdbcOptions options,
JdbcReadOptions readOptions,
JdbcLookupOptions lookupOptions,
......@@ -112,7 +112,7 @@ public class JdbcTableSource
@Override
public TableFunction<Row> getLookupFunction(String[] lookupKeys) {
final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType);
return JdbcLookupFunction.builder()
return PhoenixLookupFunction.builder()
.setOptions(options)
.setLookupOptions(lookupOptions)
.setFieldTypes(rowTypeInfo.getFieldTypes())
......@@ -128,7 +128,7 @@ public class JdbcTableSource
@Override
public TableSource<Row> projectFields(int[] fields) {
return new JdbcTableSource(options, readOptions, lookupOptions, schema, fields);
return new PhoenixTableSource(options, readOptions, lookupOptions, schema, fields);
}
@Override
......@@ -156,10 +156,10 @@ public class JdbcTableSource
return new Builder();
}
private JdbcInputFormat getInputFormat() {
private PhoenixInputFormat getInputFormat() {
final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType);
JdbcInputFormat.JdbcInputFormatBuilder builder =
JdbcInputFormat.buildJdbcInputFormat()
PhoenixInputFormat.PhoenixInputFormatBuilder builder =
PhoenixInputFormat.buildJdbcInputFormat()
.setDrivername(options.getDriverName())
.setDBUrl(options.getDbURL())
.setRowTypeInfo(
......@@ -210,8 +210,8 @@ public class JdbcTableSource
@Override
public boolean equals(Object o) {
if (o instanceof JdbcTableSource) {
JdbcTableSource source = (JdbcTableSource) o;
if (o instanceof PhoenixTableSource) {
PhoenixTableSource source = (PhoenixTableSource) o;
return Objects.equals(options, source.options)
&& Objects.equals(readOptions, source.readOptions)
&& Objects.equals(lookupOptions, source.lookupOptions)
......@@ -222,7 +222,7 @@ public class JdbcTableSource
}
}
/** Builder for a {@link JdbcTableSource}. */
/** Builder for a {@link PhoenixTableSource}. */
public static class Builder {
private JdbcOptions options;
......@@ -265,7 +265,7 @@ public class JdbcTableSource
*
* @return Configured JdbcTableSource
*/
public JdbcTableSource build() {
public PhoenixTableSource build() {
checkNotNull(options, "No options supplied.");
checkNotNull(schema, "No schema supplied.");
if (readOptions == null) {
......@@ -274,7 +274,7 @@ public class JdbcTableSource
if (lookupOptions == null) {
lookupOptions = JdbcLookupOptions.builder().build();
}
return new JdbcTableSource(options, readOptions, lookupOptions, schema);
return new PhoenixTableSource(options, readOptions, lookupOptions, schema);
}
}
}
......@@ -125,7 +125,7 @@ public class PhoenixTableSourceSinkFactory
TableSchema schema =
TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA));
return JdbcTableSource.builder()
return PhoenixTableSource.builder()
.setOptions(getJdbcOptions(descriptorProperties))
.setReadOptions(getJdbcReadOptions(descriptorProperties))
.setLookupOptions(getJdbcLookupOptions(descriptorProperties))
......@@ -140,8 +140,8 @@ public class PhoenixTableSourceSinkFactory
TableSchema schema =
TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA));
final JdbcUpsertTableSink.Builder builder =
JdbcUpsertTableSink.builder()
final PhoenixUpsertTableSink.Builder builder =
PhoenixUpsertTableSink.builder()
.setOptions(getJdbcOptions(descriptorProperties))
.setTableSchema(schema);
......
......@@ -27,8 +27,6 @@ import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.internal.AbstractJdbcOutputFormat;
import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.PhoenixSinkFunction;
import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.internal.options.JdbcOptions;
import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil;
......@@ -48,7 +46,7 @@ import java.util.Objects;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** An upsert {@link UpsertStreamTableSink} for JDBC. */
public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
public class PhoenixUpsertTableSink implements UpsertStreamTableSink<Row> {
private final TableSchema schema;
private final JdbcOptions options;
......@@ -59,7 +57,7 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
private String[] keyFields;
private boolean isAppendOnly;
private JdbcUpsertTableSink(
private PhoenixUpsertTableSink(
TableSchema schema,
JdbcOptions options,
int flushMaxSize,
......@@ -97,11 +95,6 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
// sql types
int[] jdbcSqlTypes =
Arrays.stream(schema.getFieldTypes())
.mapToInt(JdbcTypeUtil::typeInformationToSqlType)
.toArray();
return dataStream
.addSink(new GenericJdbcSinkFunction<>(newFormat()))
......@@ -166,8 +159,8 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
+ Arrays.toString(fieldTypes));
}
JdbcUpsertTableSink copy =
new JdbcUpsertTableSink(
PhoenixUpsertTableSink copy =
new PhoenixUpsertTableSink(
schema, options, flushMaxSize, flushIntervalMills, maxRetryTime);
copy.keyFields = keyFields;
return copy;
......@@ -179,8 +172,8 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
@Override
public boolean equals(Object o) {
if (o instanceof JdbcUpsertTableSink) {
JdbcUpsertTableSink sink = (JdbcUpsertTableSink) o;
if (o instanceof PhoenixUpsertTableSink) {
PhoenixUpsertTableSink sink = (PhoenixUpsertTableSink) o;
return Objects.equals(schema, sink.schema)
&& Objects.equals(options, sink.options)
&& Objects.equals(flushMaxSize, sink.flushMaxSize)
......@@ -193,7 +186,7 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
}
}
/** Builder for a {@link JdbcUpsertTableSink}. */
/** Builder for a {@link PhoenixUpsertTableSink}. */
public static class Builder {
protected TableSchema schema;
private JdbcOptions options;
......@@ -234,10 +227,10 @@ public class JdbcUpsertTableSink implements UpsertStreamTableSink<Row> {
return this;
}
public JdbcUpsertTableSink build() {
public PhoenixUpsertTableSink build() {
checkNotNull(schema, "No schema supplied.");
checkNotNull(options, "No options supplied.");
return new JdbcUpsertTableSink(
return new PhoenixUpsertTableSink(
schema, options, flushMaxSize, flushIntervalMills, maxRetryTimes);
}
}
......
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.Preconditions;
import javax.annotation.concurrent.ThreadSafe;
import javax.transaction.xa.Xid;
import java.util.Objects;
/**
* A pair of checkpoint id and {@link Xid} representing a checkpoint and an associated pending
* (prepared) XA transaction. Thread-safe (assuming immutable {@link Xid} implementation).
*/
@ThreadSafe
@Internal
public final class CheckpointAndXid {
final long checkpointId;
final Xid xid;
final int attempts;
final boolean restored;
public Xid getXid() {
return xid;
}
private CheckpointAndXid(long checkpointId, Xid xid, int attempts, boolean restored) {
this.checkpointId = checkpointId;
this.xid = Preconditions.checkNotNull(xid);
this.attempts = attempts;
this.restored = restored;
}
@Override
public String toString() {
return String.format("checkpointId=%d, xid=%s, restored=%s", checkpointId, xid, restored);
}
CheckpointAndXid asRestored() {
return restored ? this : new CheckpointAndXid(checkpointId, xid, attempts, true);
}
static CheckpointAndXid createRestored(long checkpointId, int attempts, Xid xid) {
return new CheckpointAndXid(checkpointId, xid, attempts, true);
}
static CheckpointAndXid createNew(long checkpointId, Xid xid) {
return new CheckpointAndXid(checkpointId, xid, 0, false);
}
CheckpointAndXid withAttemptsIncremented() {
return new CheckpointAndXid(checkpointId, xid, attempts + 1, restored);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof CheckpointAndXid)) {
return false;
}
CheckpointAndXid that = (CheckpointAndXid) o;
return checkpointId == that.checkpointId
&& attempts == that.attempts
&& restored == that.restored
&& Objects.equals(xid, that.xid);
}
@Override
public int hashCode() {
return Objects.hash(checkpointId, xid, attempts, restored);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeutils.SimpleTypeSerializerSnapshot;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.TypeSerializerSnapshot;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataOutputView;
import javax.transaction.xa.Xid;
import java.io.IOException;
import java.util.Objects;
/** {@link CheckpointAndXid} serializer. */
@Internal
public final class CheckpointAndXidSerializer extends TypeSerializer<CheckpointAndXid> {
private static final long serialVersionUID = 1L;
public static final TypeSerializerSnapshot<CheckpointAndXid> SNAPSHOT =
new CheckpointAndXidSimpleTypeSerializerSnapshot();
private final TypeSerializer<Xid> xidSerializer = new XidSerializer();
@Override
public boolean isImmutableType() {
return xidSerializer.isImmutableType();
}
@Override
public TypeSerializer<CheckpointAndXid> duplicate() {
return this;
}
@Override
public CheckpointAndXid createInstance() {
return CheckpointAndXid.createRestored(0L, 0, xidSerializer.createInstance());
}
@Override
public CheckpointAndXid copy(CheckpointAndXid from) {
return CheckpointAndXid.createRestored(
from.checkpointId, from.attempts, xidSerializer.copy(from.xid));
}
@Override
public CheckpointAndXid copy(CheckpointAndXid from, CheckpointAndXid reuse) {
return from;
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(CheckpointAndXid record, DataOutputView target) throws IOException {
target.writeLong(record.checkpointId);
target.writeInt(record.attempts);
xidSerializer.serialize(record.xid, target);
}
@Override
public CheckpointAndXid deserialize(DataInputView source) throws IOException {
return CheckpointAndXid.createRestored(
source.readLong(), source.readInt(), xidSerializer.deserialize(source));
}
@Override
public CheckpointAndXid deserialize(CheckpointAndXid reuse, DataInputView source)
throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
serialize(deserialize(source), target);
}
@Override
public boolean equals(Object o) {
return o instanceof CheckpointAndXidSerializer;
}
@Override
public int hashCode() {
return Objects.hash(xidSerializer);
}
@Override
public TypeSerializerSnapshot<CheckpointAndXid> snapshotConfiguration() {
return SNAPSHOT;
}
/** SImple {@link TypeSerializerSnapshot} for {@link CheckpointAndXidSerializer}. */
public static class CheckpointAndXidSimpleTypeSerializerSnapshot
extends SimpleTypeSerializerSnapshot<CheckpointAndXid> {
private static final int VERSION = 1;
public CheckpointAndXidSimpleTypeSerializerSnapshot() {
super(CheckpointAndXidSerializer::new);
}
@Override
public void writeSnapshot(DataOutputView out) throws IOException {
super.writeSnapshot(out);
out.writeInt(VERSION);
}
@Override
public void readSnapshot(int readVersion, DataInputView in, ClassLoader classLoader)
throws IOException {
super.readSnapshot(readVersion, in, classLoader);
in.readInt();
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*//*
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.AbstractRichFunction;
import org.apache.flink.api.common.state.CheckpointListener;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.phoenix.JdbcExactlyOnceOptions;
import org.apache.flink.connector.phoenix.JdbcExecutionOptions;
import org.apache.flink.connector.phoenix.JdbcStatementBuilder;
import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat;
import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor;
import org.apache.flink.connector.phoenix.xa.CheckpointAndXid;
import org.apache.flink.connector.phoenix.xa.XaFacade;
import org.apache.flink.connector.phoenix.xa.XaFacade.EmptyXaTransactionException;
import org.apache.flink.connector.phoenix.xa.XidGenerator;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.transaction.xa.Xid;
import java.io.IOException;
import java.util.*;
import java.util.function.Function;
import static org.apache.flink.connector.phoenix.xa.JdbcXaSinkFunctionState.of;
*/
/**
* JDBC sink function that uses XA transactions to provide exactly once guarantees. That is, if a
* checkpoint succeeds then all records emitted during it are committed in the database, and rolled
* back otherwise.
*
* <p>Each parallel subtask has it's own transactions, independent from other subtasks. Therefore,
* consistency is only guaranteed within partitions.
*
* <p>XA uses a two-phase commit protocol, which solves the consistency problem, but leaves the
* following issues:
*
* <ol>
* <li>transactions may be abandoned, holding resources (e.g. locks, versions of rows)
* <li>abandoned transactions collide with the new transactions if their IDs repeat after recovery
* <li>commit requests may be repeated after job recovery, resulting in error responses and job
* failure
* </ol>
*
* <p>The following table summarizes effects of failures during transaction state transitions and
* ways to mitigate them:
*
* <table border="1" style="width:100%;">
* <col span="1" style="width:15%;">
* <col span="1" style="width:15%;">
* <col span="1" style="width:30%;">
* <col span="1" style="width:40%;">
* <thead>
* <tr>
* <th>Transition</th>
* <th>Methods</th>
* <th>What happens if transition lost</th>
* <th>Ways to mitigate</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <td>none &gt; started, started &gt; ended</td>
* <td>open(), snapshotState()</td>
* <td>Database eventually discards these transactions</td>
* <td><ol>
* <li>Use globally unique XIDs</li>
* <li>derive XID from: checkpoint id, subtask id, "job id", "run id" (see {@link org.apache.flink.connector.phoenix.xa.SemanticXidGenerator}).</li>
* </ol></td>
* </tr>
* <tr>
* <td>ended &gt; prepared</td>
* <td>snapshotState()</td>
* <td>Database keeps these transactions prepared forever ("in-doubt" state)</td>
* <td>
* <ol>
* <li>store ended transactions in state; rollback on job recovery (still doesn't cover all scenarios)</li>
* <li>call xa_recover() and xa_rollback() on job recovery; disabled by default in order not to affect transactions of other subtasks and apps</li>
* <li>setting transaction timeouts (not supported by most databases)</li>
* <li>manual recovery and rollback</li>
* </ol>
* </td>
* </tr>
* <tr>
* <td>prepared &gt; committed</td>
* <td>open(), notifyCheckpointComplete()</td>
* <td>
* Upon job recovery state contains committed transactions; or JM may notifyCheckpointComplete again after recovery.
* <p>Committing results in {@link javax.transaction.xa.XAException#XAER_NOTA XAER_NOTA} error.</p>
* </td>
* <td>
* Distinguish between transactions created during this run and restored from state and ignore {@link javax.transaction.xa.XAException#XAER_NOTA XAER_NOTA} for the latter.
* </td>
* </tr>
* </tbody>
* </table>
*
* <p>Attention: JdbcXaSinkFunction does not support exactly-once mode with MySQL or other databases
* that do not support multiple XA transaction per connection. We will improve the support in
* FLINK-22239.
*
* @since 1.13
*//*
@Internal
public class JdbcXaSinkFunction<T> extends AbstractRichFunction
implements CheckpointedFunction, CheckpointListener, SinkFunction<T>, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(JdbcXaSinkFunction.class);
private final org.apache.flink.connector.phoenix.xa.XaFacade xaFacade;
private final org.apache.flink.connector.phoenix.xa.XaGroupOps xaGroupOps;
private final org.apache.flink.connector.phoenix.xa.XidGenerator xidGenerator;
private final JdbcBatchingOutputFormat<T, T, JdbcBatchStatementExecutor<T>> outputFormat;
private final org.apache.flink.connector.phoenix.xa.XaSinkStateHandler stateHandler;
private final JdbcExactlyOnceOptions options;
// checkpoints and the corresponding transactions waiting for completion notification from JM
private transient List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid> preparedXids = new ArrayList<>();
// hanging XIDs - used for cleanup
// it's a list to support retries and scaling down
// possible transaction states: active, idle, prepared
// last element is the current xid
private transient Deque<Xid> hangingXids = new LinkedList<>();
private transient Xid currentXid;
*/
/**
* Creates a {@link JdbcXaSinkFunction}.
*
* <p>All parameters must be {@link java.io.Serializable serializable}.
*
* @param xaFacade {@link org.apache.flink.connector.phoenix.xa.XaFacade} to manage XA transactions
*//*
public JdbcXaSinkFunction(
String sql,
JdbcStatementBuilder<T> statementBuilder,
org.apache.flink.connector.phoenix.xa.XaFacade xaFacade,
JdbcExecutionOptions executionOptions,
JdbcExactlyOnceOptions options) {
this(
new JdbcBatchingOutputFormat<>(
xaFacade,
executionOptions,
context -> {
Preconditions.checkState(
!context.getExecutionConfig().isObjectReuseEnabled(),
"objects can not be reused with JDBC sink function");
return JdbcBatchStatementExecutor.simple(
sql, statementBuilder, Function.identity());
},
JdbcBatchingOutputFormat.RecordExtractor.identity()),
xaFacade,
org.apache.flink.connector.phoenix.xa.XidGenerator.semanticXidGenerator(),
new org.apache.flink.connector.phoenix.xa.XaSinkStateHandlerImpl(),
options,
new org.apache.flink.connector.phoenix.xa.XaGroupOpsImpl(xaFacade));
}
*/
/**
* Creates a {@link JdbcXaSinkFunction}.
*
* <p>All parameters must be {@link java.io.Serializable serializable}.
*
* @param outputFormat {@link JdbcBatchingOutputFormat} to write records with
* @param xaFacade {@link org.apache.flink.connector.phoenix.xa.XaFacade} to manage XA transactions
* @param xidGenerator {@link org.apache.flink.connector.phoenix.xa.XidGenerator} to generate new transaction ids
*//*
public JdbcXaSinkFunction(
JdbcBatchingOutputFormat<T, T, JdbcBatchStatementExecutor<T>> outputFormat,
XaFacade xaFacade,
XidGenerator xidGenerator,
org.apache.flink.connector.phoenix.xa.XaSinkStateHandler stateHandler,
JdbcExactlyOnceOptions options,
org.apache.flink.connector.phoenix.xa.XaGroupOps xaGroupOps) {
this.xaFacade = Preconditions.checkNotNull(xaFacade);
this.xidGenerator = Preconditions.checkNotNull(xidGenerator);
this.outputFormat = Preconditions.checkNotNull(outputFormat);
this.stateHandler = Preconditions.checkNotNull(stateHandler);
this.options = Preconditions.checkNotNull(options);
this.xaGroupOps = xaGroupOps;
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
org.apache.flink.connector.phoenix.xa.JdbcXaSinkFunctionState state = stateHandler.load(context);
hangingXids = new LinkedList<>(state.getHanging());
preparedXids = new ArrayList<>(state.getPrepared());
LOG.info(
"initialized state: prepared xids: {}, hanging xids: {}",
preparedXids.size(),
hangingXids.size());
}
@Override
public void open(Configuration configuration) throws Exception {
super.open(configuration);
xidGenerator.open();
xaFacade.open();
hangingXids = new LinkedList<>(xaGroupOps.failOrRollback(hangingXids).getForRetry());
commitUpToCheckpoint(Optional.empty());
if (options.isDiscoverAndRollbackOnRecovery()) {
// Pending transactions which are not included into the checkpoint might hold locks and
// should be rolled back. However, rolling back ALL transactions can cause data loss. So
// each subtask first commits transactions from its state and then rolls back discovered
// transactions if they belong to it.
xaGroupOps.recoverAndRollback(getRuntimeContext(), xidGenerator);
}
beginTx(0L);
outputFormat.setRuntimeContext(getRuntimeContext());
// open format only after starting the transaction so it gets a ready to use connection
outputFormat.open(
getRuntimeContext().getIndexOfThisSubtask(),
getRuntimeContext().getNumberOfParallelSubtasks());
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
LOG.debug("snapshot state, checkpointId={}", context.getCheckpointId());
prepareCurrentTx(context.getCheckpointId());
beginTx(context.getCheckpointId() + 1);
stateHandler.store(of(preparedXids, hangingXids));
}
@Override
public void notifyCheckpointComplete(long checkpointId) {
commitUpToCheckpoint(Optional.of(checkpointId));
}
@Override
public void invoke(T value, Context context) throws IOException {
Preconditions.checkState(currentXid != null, "current xid must not be null");
if (LOG.isTraceEnabled()) {
LOG.trace("invoke, xid: {}, value: {}", currentXid, value);
}
outputFormat.writeRecord(value);
}
@Override
public void close() throws Exception {
super.close();
if (currentXid != null && xaFacade.isOpen()) {
try {
LOG.debug("remove current transaction before closing, xid={}", currentXid);
xaFacade.failAndRollback(currentXid);
} catch (Exception e) {
LOG.warn("unable to fail/rollback current transaction, xid={}", currentXid, e);
}
}
xaFacade.close();
xidGenerator.close();
// don't format.close(); as we don't want neither to flush nor to close connection here
currentXid = null;
hangingXids = null;
preparedXids = null;
}
private void prepareCurrentTx(long checkpointId) throws IOException {
Preconditions.checkState(currentXid != null, "no current xid");
Preconditions.checkState(
!hangingXids.isEmpty() && hangingXids.peekLast().equals(currentXid),
"inconsistent internal state");
hangingXids.pollLast();
outputFormat.flush();
try {
xaFacade.endAndPrepare(currentXid);
preparedXids.add(org.apache.flink.connector.phoenix.xa.CheckpointAndXid.createNew(checkpointId, currentXid));
} catch (EmptyXaTransactionException e) {
LOG.info(
"empty XA transaction (skip), xid: {}, checkpoint {}",
currentXid,
checkpointId);
} catch (Exception e) {
ExceptionUtils.rethrowIOException(e);
}
currentXid = null;
}
*/
/** @param checkpointId to associate with the new transaction. *//*
private void beginTx(long checkpointId) throws Exception {
Preconditions.checkState(currentXid == null, "currentXid not null");
currentXid = xidGenerator.generateXid(getRuntimeContext(), checkpointId);
hangingXids.offerLast(currentXid);
xaFacade.start(currentXid);
if (checkpointId > 0) {
// associate outputFormat with a new connection that might have been opened in start()
outputFormat.updateExecutor(false);
}
}
private void commitUpToCheckpoint(Optional<Long> checkpointInclusive) {
Tuple2<List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>, List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>> splittedXids =
split(preparedXids, checkpointInclusive, true);
if (splittedXids.f0.isEmpty()) {
checkpointInclusive.ifPresent(
cp -> LOG.warn("nothing to commit up to checkpoint: {}", cp));
} else {
preparedXids = splittedXids.f1;
preparedXids.addAll(
xaGroupOps
.commit(
splittedXids.f0,
options.isAllowOutOfOrderCommits(),
options.getMaxCommitAttempts())
.getForRetry());
}
}
private Tuple2<List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>, List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>> split(
List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid> list,
Optional<Long> checkpointInclusive,
boolean checkpointIntoLo) {
return checkpointInclusive
.map(cp -> split(preparedXids, cp, checkpointIntoLo))
.orElse(new Tuple2<>(list, new ArrayList<>()));
}
private Tuple2<List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>, List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid>> split(
List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid> list, long checkpoint, boolean checkpointIntoLo) {
List<org.apache.flink.connector.phoenix.xa.CheckpointAndXid> lo = new ArrayList<>(list.size() / 2);
List<CheckpointAndXid> hi = new ArrayList<>(list.size() / 2);
list.forEach(
i -> {
if (i.checkpointId < checkpoint
|| (i.checkpointId == checkpoint && checkpointIntoLo)) {
lo.add(i);
} else {
hi.add(i);
}
});
return new Tuple2<>(lo, hi);
}
}
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import javax.annotation.concurrent.ThreadSafe;
import javax.transaction.xa.Xid;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import static java.util.Collections.unmodifiableCollection;
/** Thread-safe (assuming immutable {@link Xid} implementation). */
@ThreadSafe
class JdbcXaSinkFunctionState {
private final Collection<CheckpointAndXid> prepared;
private final Collection<Xid> hanging;
static JdbcXaSinkFunctionState empty() {
return new JdbcXaSinkFunctionState(Collections.emptyList(), Collections.emptyList());
}
static JdbcXaSinkFunctionState of(
Collection<CheckpointAndXid> prepared, Collection<Xid> hanging) {
return new JdbcXaSinkFunctionState(
unmodifiableCollection(new ArrayList<>(prepared)),
unmodifiableCollection(new ArrayList<>(hanging)));
}
private JdbcXaSinkFunctionState(
Collection<CheckpointAndXid> prepared, Collection<Xid> hanging) {
this.prepared = prepared;
this.hanging = hanging;
}
/**
* @return immutable collection of prepared XA transactions to {@link
* javax.transaction.xa.XAResource#commit commit}.
*/
public Collection<CheckpointAndXid> getPrepared() {
return prepared;
}
/**
* @return immutable collection of XA transactions to {@link
* javax.transaction.xa.XAResource#rollback rollback} (if they were prepared) or {@link
* javax.transaction.xa.XAResource#end end} (if they were only started).
*/
Collection<Xid> getHanging() {
return hanging;
}
@Override
public String toString() {
return "prepared=" + prepared + ", hanging=" + hanging;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.functions.RuntimeContext;
import javax.transaction.xa.Xid;
import java.security.SecureRandom;
import java.util.Arrays;
/**
* Generates {@link Xid} from:
*
* <ol>
* <li>To provide uniqueness over other jobs and apps, and other instances
* <li>of this job, gtrid consists of
* <li>job id (16 bytes)
* <li>subtask index (4 bytes)
* <li>checkpoint id (4 bytes)
* <li>bqual consists of 4 random bytes (generated using {@link SecureRandom})
* </ol>
*
* <p>Each {@link SemanticXidGenerator} instance MUST be used for only one Sink (otherwise Xids will
* collide).
*/
@Internal
class SemanticXidGenerator implements XidGenerator {
private static final long serialVersionUID = 1L;
private static final SecureRandom SECURE_RANDOM = new SecureRandom();
private static final int FORMAT_ID = 201;
private transient byte[] gtridBuffer;
private transient byte[] bqualBuffer;
@Override
public void open() {
// globalTransactionId = job id + task index + checkpoint id
gtridBuffer = new byte[JobID.SIZE + Integer.BYTES + Long.BYTES];
// branchQualifier = random bytes
bqualBuffer = getRandomBytes(Integer.BYTES);
}
@Override
public Xid generateXid(RuntimeContext runtimeContext, long checkpointId) {
byte[] jobIdBytes = runtimeContext.getJobId().getBytes();
System.arraycopy(jobIdBytes, 0, gtridBuffer, 0, JobID.SIZE);
writeNumber(runtimeContext.getIndexOfThisSubtask(), Integer.BYTES, gtridBuffer, JobID.SIZE);
writeNumber(checkpointId, Long.BYTES, gtridBuffer, JobID.SIZE + Integer.BYTES);
// relying on arrays copying inside XidImpl constructor
return new XidImpl(FORMAT_ID, gtridBuffer, bqualBuffer);
}
@Override
public boolean belongsToSubtask(Xid xid, RuntimeContext ctx) {
if (xid.getFormatId() != FORMAT_ID) {
return false;
}
int subtaskIndex = readNumber(xid.getGlobalTransactionId(), JobID.SIZE, Integer.BYTES);
if (subtaskIndex != ctx.getIndexOfThisSubtask()
&& subtaskIndex <= ctx.getNumberOfParallelSubtasks() - 1) {
return false;
}
byte[] jobIdBytes = new byte[JobID.SIZE];
System.arraycopy(xid.getGlobalTransactionId(), 0, jobIdBytes, 0, JobID.SIZE);
return Arrays.equals(jobIdBytes, ctx.getJobId().getBytes());
}
private static int readNumber(byte[] bytes, int offset, int numBytes) {
int result = 0;
for (int i = 0; i < numBytes; i++) {
result |= (bytes[offset + i] & 0xff) << Byte.SIZE * i;
}
return result;
}
private static void writeNumber(long number, int numBytes, byte[] dst, int dstOffset) {
for (int i = dstOffset; i < dstOffset + numBytes; i++) {
dst[i] = (byte) number;
number >>>= Byte.SIZE;
}
}
private byte[] getRandomBytes(int size) {
byte[] bytes = new byte[size];
SECURE_RANDOM.nextBytes(bytes);
return bytes;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider;
import org.apache.flink.util.FlinkRuntimeException;
import javax.sql.XADataSource;
import javax.transaction.xa.XAException;
import javax.transaction.xa.Xid;
import java.io.Serializable;
import java.util.Collection;
import java.util.function.Supplier;
/**
* Facade to the XA operations relevant to {@link
* org.apache.flink.streaming.api.functions.sink.SinkFunction sink}.
*
* <p>Typical workflow:
*
* <ol>
* <li>{@link #open}
* <li>{@link #start} transaction
* <li>{@link #getConnection}, write some data
* <li>{@link #endAndPrepare} (or {@link #failAndRollback})
* <li>{@link #commit} / {@link #rollback}
* <li>{@link #close}
* </ol>
*
* {@link #recover} can be used to get abandoned prepared transactions for cleanup.
*/
@Internal
public interface XaFacade extends JdbcConnectionProvider, Serializable, AutoCloseable {
/** @return a non-serializable instance. */
static XaFacade fromXaDataSourceSupplier(
Supplier<XADataSource> dataSourceSupplier,
Integer timeoutSec,
boolean transactionPerConnection) {
return transactionPerConnection
? new XaFacadePoolingImpl(() -> new XaFacadeImpl(dataSourceSupplier, timeoutSec))
: new XaFacadeImpl(dataSourceSupplier, timeoutSec);
}
void open() throws Exception;
boolean isOpen();
/** Start a new transaction. */
void start(Xid xid) throws Exception;
/** End and then prepare the transaction. Transaction can't be resumed afterwards. */
void endAndPrepare(Xid xid) throws Exception;
/**
* Commit previously prepared transaction.
*
* @param ignoreUnknown whether to ignore {@link XAException#XAER_NOTA
* XAER_NOTA} error.
*/
void commit(Xid xid, boolean ignoreUnknown) throws TransientXaException;
/** Rollback previously prepared transaction. */
void rollback(Xid xid) throws TransientXaException;
/**
* End transaction as {@link javax.transaction.xa.XAResource#TMFAIL failed}; in case of error,
* try to roll it back.
*/
void failAndRollback(Xid xid) throws TransientXaException;
/**
* Note: this can block on some non-MVCC databases if there are ended not prepared transactions.
*/
Collection<Xid> recover() throws TransientXaException;
/**
* Thrown by {@link XaFacade} when RM responds with {@link
* javax.transaction.xa.XAResource#XA_RDONLY XA_RDONLY} indicating that the transaction doesn't
* include any changes. When such a transaction is committed RM may return an error (usually,
* {@link XAException#XAER_NOTA XAER_NOTA}).
*/
class EmptyXaTransactionException extends FlinkRuntimeException {
private final Xid xid;
EmptyXaTransactionException(Xid xid) {
super("end response XA_RDONLY, xid: " + xid);
this.xid = xid;
}
public Xid getXid() {
return xid;
}
}
/**
* Indicates a transient or unknown failure from the resource manager (see {@link
* XAException#XA_RBTRANSIENT XA_RBTRANSIENT}, {@link XAException#XAER_RMFAIL XAER_RMFAIL}).
*/
class TransientXaException extends FlinkRuntimeException {
TransientXaException(XAException cause) {
super(cause);
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.function.ThrowingRunnable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.NotThreadSafe;
import javax.sql.XAConnection;
import javax.sql.XADataSource;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static javax.transaction.xa.XAException.XAER_NOTA;
import static javax.transaction.xa.XAException.XAER_RMFAIL;
import static javax.transaction.xa.XAException.XA_HEURCOM;
import static javax.transaction.xa.XAException.XA_HEURHAZ;
import static javax.transaction.xa.XAException.XA_HEURMIX;
import static javax.transaction.xa.XAException.XA_HEURRB;
import static javax.transaction.xa.XAException.XA_RBBASE;
import static javax.transaction.xa.XAException.XA_RBTIMEOUT;
import static javax.transaction.xa.XAException.XA_RBTRANSIENT;
import static javax.transaction.xa.XAResource.TMENDRSCAN;
import static javax.transaction.xa.XAResource.TMNOFLAGS;
import static javax.transaction.xa.XAResource.TMSTARTRSCAN;
/** Default {@link XaFacade} implementation. */
@NotThreadSafe
@Internal
class XaFacadeImpl implements XaFacade {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(XaFacadeImpl.class);
private static final Set<Integer> TRANSIENT_ERR_CODES =
new HashSet<>(Arrays.asList(XA_RBTRANSIENT, XAER_RMFAIL));
private static final Set<Integer> HEUR_ERR_CODES =
new HashSet<>(Arrays.asList(XA_HEURRB, XA_HEURCOM, XA_HEURHAZ, XA_HEURMIX));
private static final int MAX_RECOVER_CALLS = 100;
private final Supplier<XADataSource> dataSourceSupplier;
private final Integer timeoutSec;
private transient XAResource xaResource;
private transient Connection connection;
private transient XAConnection xaConnection;
@VisibleForTesting
static XaFacadeImpl fromXaDataSource(XADataSource ds) {
return new XaFacadeImpl(() -> ds, null);
}
XaFacadeImpl(Supplier<XADataSource> dataSourceSupplier, Integer timeoutSec) {
this.dataSourceSupplier = Preconditions.checkNotNull(dataSourceSupplier);
this.timeoutSec = timeoutSec;
}
@Override
public void open() throws SQLException {
Preconditions.checkState(!isOpen(), "already connected");
XADataSource ds = dataSourceSupplier.get();
xaConnection = ds.getXAConnection();
xaResource = xaConnection.getXAResource();
if (timeoutSec != null) {
try {
xaResource.setTransactionTimeout(timeoutSec);
} catch (XAException e) {
throw new SQLException(e);
}
}
connection = xaConnection.getConnection();
connection.setReadOnly(false);
connection.setAutoCommit(false);
Preconditions.checkState(!connection.getAutoCommit());
}
@Override
public void close() throws SQLException {
if (connection != null) {
connection.close(); // close connection - likely a wrapper
connection = null;
}
try {
xaConnection.close(); // close likely a pooled AND the underlying connection
} catch (SQLException e) {
// Some databases (e.g. MySQL) rollback changes on normal client disconnect which
// causes an exception if an XA transaction was prepared. Note that resources are
// still released in case of an error. Pinning MySQL connections doesn't help as
// SuspendableXAConnection has the same close() logic.
// Other DBs don't rollback, e.g. for PgSql the previous connection.close() call
// disassociates the connection (and that call works because it has a check for XA)
// and rollback() is not called.
// In either case, not closing the XA connection here leads to the resource leak.
LOG.warn("unable to close XA connection", e);
}
xaResource = null;
}
@Override
public Connection getConnection() {
Preconditions.checkNotNull(connection);
return connection;
}
@Override
public boolean isConnectionValid() throws SQLException {
return isOpen() && connection.isValid(connection.getNetworkTimeout());
}
@Override
public Connection getOrEstablishConnection() throws SQLException {
if (!isOpen()) {
open();
}
return connection;
}
@Override
public void closeConnection() {
try {
close();
} catch (SQLException e) {
LOG.warn("Connection close failed.", e);
}
}
@Override
public Connection reestablishConnection() {
throw new UnsupportedOperationException();
}
@Override
public void start(Xid xid) {
execute(Command.fromRunnable("start", xid, () -> xaResource.start(xid, TMNOFLAGS)));
}
@Override
public void endAndPrepare(Xid xid) {
execute(Command.fromRunnable("end", xid, () -> xaResource.end(xid, XAResource.TMSUCCESS)));
int prepResult = execute(new Command<>("prepare", of(xid), () -> xaResource.prepare(xid)));
if (prepResult == XAResource.XA_RDONLY) {
throw new EmptyXaTransactionException(xid);
} else if (prepResult != XAResource.XA_OK) {
throw new FlinkRuntimeException(
formatErrorMessage("prepare", of(xid), empty(), "response: " + prepResult));
}
}
@Override
public void failAndRollback(Xid xid) {
execute(
Command.fromRunnable(
"end (fail)",
xid,
() -> {
xaResource.end(xid, XAResource.TMFAIL);
xaResource.rollback(xid);
},
err -> {
if (err.errorCode >= XA_RBBASE) {
rollback(xid);
} else {
LOG.warn(
formatErrorMessage(
"end (fail)", of(xid), of(err.errorCode)));
}
}));
}
@Override
public void commit(Xid xid, boolean ignoreUnknown) {
execute(
Command.fromRunnableRecoverByWarn(
"commit",
xid,
() ->
xaResource.commit(
xid,
false /* not onePhase because the transaction should be prepared already */),
e -> buildCommitErrorDesc(e, ignoreUnknown)));
}
@Override
public void rollback(Xid xid) {
execute(
Command.fromRunnableRecoverByWarn(
"rollback",
xid,
() -> xaResource.rollback(xid),
this::buildRollbackErrorDesc));
}
private void forget(Xid xid) {
execute(
Command.fromRunnableRecoverByWarn(
"forget",
xid,
() -> xaResource.forget(xid),
e -> of("manual cleanup may be required")));
}
@Override
public Collection<Xid> recover() {
return execute(
new Command<>(
"recover",
empty(),
() -> {
List<Xid> list = recover(TMSTARTRSCAN);
try {
for (int i = 0; list.addAll(recover(TMNOFLAGS)); i++) {
// H2 sometimes returns same tx list here - should probably use
// recover(TMSTARTRSCAN | TMENDRSCAN)
Preconditions.checkState(
i < MAX_RECOVER_CALLS, "too many xa_recover() calls");
}
} finally {
recover(TMENDRSCAN);
}
return list;
}));
}
@Override
public boolean isOpen() {
return xaResource != null;
}
private List<Xid> recover(int flags) throws XAException {
return Arrays.asList(xaResource.recover(flags));
}
private <T> T execute(Command<T> cmd) throws FlinkRuntimeException {
Preconditions.checkState(isOpen(), "not connected");
LOG.debug("{}, xid={}", cmd.name, cmd.xid);
try {
T result = cmd.callable.call();
LOG.trace("{} succeeded , xid={}", cmd.name, cmd.xid);
return result;
} catch (XAException e) {
if (HEUR_ERR_CODES.contains(e.errorCode)) {
cmd.xid.ifPresent(this::forget);
}
return cmd.recover.apply(e).orElseThrow(() -> wrapException(cmd.name, cmd.xid, e));
} catch (FlinkRuntimeException e) {
throw e;
} catch (Exception e) {
throw wrapException(cmd.name, cmd.xid, e);
}
}
private static class Command<T> {
private final String name;
private final Optional<Xid> xid;
private final Callable<T> callable;
private final Function<XAException, Optional<T>> recover;
static Command<Object> fromRunnable(
String action, Xid xid, ThrowingRunnable<XAException> runnable) {
return fromRunnable(
action,
xid,
runnable,
e -> {
throw wrapException(action, of(xid), e);
});
}
static Command<Object> fromRunnableRecoverByWarn(
String action,
Xid xid,
ThrowingRunnable<XAException> runnable,
Function<XAException, Optional<String>> err2msg) {
return fromRunnable(
action,
xid,
runnable,
e ->
LOG.warn(
formatErrorMessage(
action,
of(xid),
of(e.errorCode),
err2msg.apply(e)
.orElseThrow(
() ->
wrapException(
action, of(xid), e)))));
}
private static Command<Object> fromRunnable(
String action,
Xid xid,
ThrowingRunnable<XAException> runnable,
Consumer<XAException> recover) {
return new Command<>(
action,
of(xid),
() -> {
runnable.run();
return null;
},
e -> {
recover.accept(e);
return Optional.of("");
});
}
private Command(String name, Optional<Xid> xid, Callable<T> callable) {
this(name, xid, callable, e -> empty());
}
private Command(
String name,
Optional<Xid> xid,
Callable<T> callable,
Function<XAException, Optional<T>> recover) {
this.name = name;
this.xid = xid;
this.callable = callable;
this.recover = recover;
}
}
private static FlinkRuntimeException wrapException(
String action, Optional<Xid> xid, Exception ex) {
if (ex instanceof XAException) {
XAException xa = (XAException) ex;
if (TRANSIENT_ERR_CODES.contains(xa.errorCode)) {
throw new TransientXaException(xa);
} else {
throw new FlinkRuntimeException(
formatErrorMessage(action, xid, of(xa.errorCode), xa.getMessage()));
}
} else {
throw new FlinkRuntimeException(
formatErrorMessage(action, xid, empty(), ex.getMessage()), ex);
}
}
private Optional<String> buildCommitErrorDesc(XAException err, boolean ignoreUnknown) {
if (err.errorCode == XA_HEURCOM) {
return Optional.of("transaction was heuristically committed earlier");
} else if (ignoreUnknown && err.errorCode == XAER_NOTA) {
return Optional.of("transaction is unknown to RM (ignoring)");
} else {
return empty();
}
}
private Optional<String> buildRollbackErrorDesc(XAException err) {
if (err.errorCode == XA_HEURRB) {
return Optional.of("transaction was already heuristically rolled back");
} else if (err.errorCode >= XA_RBBASE) {
return Optional.of("transaction was already marked for rollback");
} else {
return empty();
}
}
private static String formatErrorMessage(
String action, Optional<Xid> xid, Optional<Integer> errorCode, String... more) {
return String.format(
"unable to %s%s%s%s",
action,
xid.map(x -> " XA transaction, xid: " + x).orElse(""),
errorCode
.map(code -> String.format(", error %d: %s", code, descError(code)))
.orElse(""),
more == null || more.length == 0 ? "" : ". " + Arrays.toString(more));
}
/** @return error description from {@link XAException} javadoc from to ease debug. */
private static String descError(int code) {
switch (code) {
case XA_HEURCOM:
return "heuristic commit decision was made";
case XAException.XA_HEURHAZ:
return "heuristic decision may have been made";
case XAException.XA_HEURMIX:
return "heuristic mixed decision was made";
case XA_HEURRB:
return "heuristic rollback decision was made";
case XAException.XA_NOMIGRATE:
return "the transaction resumption must happen where the suspension occurred";
case XAException.XA_RBCOMMFAIL:
return "rollback happened due to a communications failure";
case XAException.XA_RBDEADLOCK:
return "rollback happened because deadlock was detected";
case XAException.XA_RBINTEGRITY:
return "rollback happened because an internal integrity check failed";
case XAException.XA_RBOTHER:
return "rollback happened for some reason not fitting any of the other rollback error codes";
case XAException.XA_RBPROTO:
return "rollback happened due to a protocol error in the resource manager";
case XAException.XA_RBROLLBACK:
return "rollback happened for an unspecified reason";
case XA_RBTIMEOUT:
return "rollback happened because of a timeout";
case XA_RBTRANSIENT:
return "rollback happened due to a transient failure";
case XAException.XA_RDONLY:
return "the transaction branch was read-only, and has already been committed";
case XAException.XA_RETRY:
return "the method invoked returned without having any effect, and that it may be invoked again";
case XAException.XAER_ASYNC:
return "an asynchronous operation is outstanding";
case XAException.XAER_DUPID:
return "Xid given as an argument is already known to the resource manager";
case XAException.XAER_INVAL:
return "invalid arguments were passed";
case XAER_NOTA:
return "Xid is not valid";
case XAException.XAER_OUTSIDE:
return "the resource manager is doing work outside the global transaction";
case XAException.XAER_PROTO:
return "protocol error";
case XAException.XAER_RMERR:
return "resource manager error has occurred";
case XAException.XAER_RMFAIL:
return "the resource manager has failed and is not available";
default:
return "";
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.function.ThrowingConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import javax.transaction.xa.Xid;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collection;
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.function.Supplier;
import static org.apache.flink.util.ExceptionUtils.rethrow;
import static org.apache.flink.util.Preconditions.checkState;
/**
* A "pooling" implementation of {@link XaFacade}. Some database implement XA such that one
* connection is limited to a single transaction. As a workaround, this implementation creates a new
* XA resource after each xa_start call is made (and associates it with the xid to commit later).
*/
@Internal
class XaFacadePoolingImpl implements XaFacade {
private static final long serialVersionUID = 1L;
public interface FacadeSupplier extends Serializable, Supplier<XaFacade> {}
private static final transient Logger LOG = LoggerFactory.getLogger(XaFacadePoolingImpl.class);
private final FacadeSupplier facadeSupplier;
private transient XaFacade active;
private transient Map<Xid, XaFacade> mappedToXids;
private transient Deque<XaFacade> pooled;
XaFacadePoolingImpl(FacadeSupplier facadeSupplier) {
this.facadeSupplier = facadeSupplier;
}
@Override
public void open() throws Exception {
checkState(active == null);
pooled = new LinkedList<>();
mappedToXids = new HashMap<>();
}
@Override
public boolean isOpen() {
return active != null && active.isOpen();
}
@Override
public void start(Xid xid) throws Exception {
checkState(active == null);
if (pooled.isEmpty()) {
active = facadeSupplier.get();
active.open();
} else {
active = pooled.poll();
}
active.start(xid);
mappedToXids.put(xid, active);
}
/**
* Must be called after {@link #start(Xid)} with the same {@link Xid}.
*
* @see XaFacade#endAndPrepare(Xid)
*/
@Override
public void endAndPrepare(Xid xid) throws Exception {
checkState(active == mappedToXids.get(xid));
try {
active.endAndPrepare(xid);
} finally {
active = null;
}
}
@Override
public void commit(Xid xid, boolean ignoreUnknown) throws TransientXaException {
runForXid(xid, facade -> facade.commit(xid, ignoreUnknown));
}
@Override
public void rollback(Xid xid) throws TransientXaException {
runForXid(xid, facade -> facade.rollback(xid));
}
@Override
public void failAndRollback(Xid xid) throws TransientXaException {
runForXid(xid, facade -> facade.failAndRollback(xid));
}
@Override
public Collection<Xid> recover() throws TransientXaException {
return peekPooled().recover();
}
@Override
public void close() throws Exception {
for (XaFacade facade : mappedToXids.values()) {
facade.close();
}
for (XaFacade facade : pooled) {
facade.close();
}
if (active != null && active.isOpen()) {
active.close();
}
}
@Nullable
@Override
public Connection getConnection() {
return active.getConnection();
}
@Override
public boolean isConnectionValid() throws SQLException {
return active.isConnectionValid();
}
@Override
public Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException {
return active.getOrEstablishConnection();
}
@Override
public void closeConnection() {
active.closeConnection();
}
@Override
public Connection reestablishConnection() throws SQLException, ClassNotFoundException {
return active.reestablishConnection();
}
// WARN: action MUST leave the facade in IDLE state (i.e. not start/end/prepare any tx)
private void runForXid(Xid xid, ThrowingConsumer<XaFacade, TransientXaException> action) {
XaFacade mapped = mappedToXids.remove(xid);
if (mapped == null) {
// a transaction can be not known during recovery
LOG.debug("No XA resource found associated with XID: {}", xid);
action.accept(peekPooled());
} else {
LOG.debug("Found mapped XA resource for XID: {} {}", xid, mapped);
try {
action.accept(mapped);
} finally {
pooled.offer(mapped);
}
}
}
// WARN: the returned facade MUST be left in IDLE state (i.e. not start/end/prepare any tx)
private XaFacade peekPooled() {
XaFacade xaFacade = pooled.peek();
if (xaFacade == null) {
xaFacade = facadeSupplier.get();
try {
xaFacade.open();
} catch (Exception e) {
rethrow(e);
}
pooled.offer(xaFacade);
}
return xaFacade;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.util.FlinkRuntimeException;
import javax.transaction.xa.Xid;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
@Internal
interface XaGroupOps extends Serializable {
GroupXaOperationResult<CheckpointAndXid> commit(
List<CheckpointAndXid> xids, boolean allowOutOfOrderCommits, int maxCommitAttempts);
GroupXaOperationResult<Xid> failOrRollback(Collection<Xid> xids);
void recoverAndRollback(RuntimeContext runtimeContext, XidGenerator xidGenerator);
class GroupXaOperationResult<T> {
private final List<T> succeeded = new ArrayList<>();
private final List<T> failed = new ArrayList<>();
private final List<T> toRetry = new ArrayList<>();
private Optional<Exception> failure = Optional.empty();
private Optional<Exception> transientFailure = Optional.empty();
void failedTransiently(T x, XaFacade.TransientXaException e) {
toRetry.add(x);
transientFailure =
getTransientFailure().isPresent() ? getTransientFailure() : Optional.of(e);
}
void failed(T x, Exception e) {
failed.add(x);
failure = failure.isPresent() ? failure : Optional.of(e);
}
void succeeded(T x) {
succeeded.add(x);
}
private FlinkRuntimeException wrapFailure(
Exception error, String formatWithCounts, int errCount) {
return new FlinkRuntimeException(
String.format(formatWithCounts, errCount, total()), error);
}
private int total() {
return succeeded.size() + failed.size() + toRetry.size();
}
List<T> getForRetry() {
return toRetry;
}
Optional<Exception> getTransientFailure() {
return transientFailure;
}
boolean hasNoFailures() {
return !failure.isPresent() && !transientFailure.isPresent();
}
void throwIfAnyFailed(String action) {
failure.map(
f ->
wrapFailure(
f,
"failed to " + action + " %d transactions out of %d",
toRetry.size() + failed.size()))
.ifPresent(
f -> {
throw f;
});
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.connector.phoenix.xa.XaFacade.TransientXaException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.transaction.xa.Xid;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
@Internal
class XaGroupOpsImpl implements XaGroupOps {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(XaGroupOpsImpl.class);
private final XaFacade xaFacade;
XaGroupOpsImpl(XaFacade xaFacade) {
this.xaFacade = xaFacade;
}
@Override
public GroupXaOperationResult<CheckpointAndXid> commit(
List<CheckpointAndXid> xids, boolean allowOutOfOrderCommits, int maxCommitAttempts) {
GroupXaOperationResult<CheckpointAndXid> result = new GroupXaOperationResult<>();
int origSize = xids.size();
LOG.debug("commit {} transactions", origSize);
for (Iterator<CheckpointAndXid> i = xids.iterator();
i.hasNext() && (result.hasNoFailures() || allowOutOfOrderCommits); ) {
CheckpointAndXid x = i.next();
i.remove();
try {
xaFacade.commit(x.xid, x.restored);
result.succeeded(x);
} catch (TransientXaException e) {
result.failedTransiently(x.withAttemptsIncremented(), e);
} catch (Exception e) {
result.failed(x, e);
}
}
result.getForRetry().addAll(xids);
result.throwIfAnyFailed("commit");
throwIfAnyReachedMaxAttempts(result, maxCommitAttempts);
result.getTransientFailure()
.ifPresent(
f ->
LOG.warn(
"failed to commit {} transactions out of {} (keep them to retry later)",
result.getForRetry().size(),
origSize,
f));
return result;
}
@Override
public GroupXaOperationResult<Xid> failOrRollback(Collection<Xid> xids) {
GroupXaOperationResult<Xid> result = new GroupXaOperationResult<>();
if (xids.isEmpty()) {
return result;
}
if (LOG.isDebugEnabled()) {
LOG.debug("rolling back {} transactions: {}", xids.size(), xids);
}
for (Xid x : xids) {
try {
xaFacade.failAndRollback(x);
result.succeeded(x);
} catch (TransientXaException e) {
LOG.info("unable to fail/rollback transaction, xid={}: {}", x, e.getMessage());
result.failedTransiently(x, e);
} catch (Exception e) {
LOG.warn("unable to fail/rollback transaction, xid={}: {}", x, e.getMessage());
result.failed(x, e);
}
}
if (!result.getForRetry().isEmpty()) {
LOG.info("failed to roll back {} transactions", result.getForRetry().size());
}
return result;
}
@Override
public void recoverAndRollback(RuntimeContext runtimeContext, XidGenerator xidGenerator) {
Collection<Xid> recovered = xaFacade.recover();
if (recovered.isEmpty()) {
return;
}
LOG.warn("rollback {} recovered transactions", recovered.size());
for (Xid xid : recovered) {
if (xidGenerator.belongsToSubtask(xid, runtimeContext)) {
try {
xaFacade.rollback(xid);
} catch (Exception e) {
LOG.info("unable to rollback recovered transaction, xid={}", xid, e);
}
}
}
}
private static void throwIfAnyReachedMaxAttempts(
GroupXaOperationResult<CheckpointAndXid> result, int maxAttempts) {
List<CheckpointAndXid> reached = null;
for (CheckpointAndXid x : result.getForRetry()) {
if (x.attempts >= maxAttempts) {
if (reached == null) {
reached = new ArrayList<>();
}
reached.add(x);
}
}
if (reached != null) {
throw new RuntimeException(
String.format(
"reached max number of commit attempts (%d) for transactions: %s",
maxAttempts, reached));
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import java.io.Serializable;
@PublicEvolving
interface XaSinkStateHandler extends Serializable {
JdbcXaSinkFunctionState load(FunctionInitializationContext context) throws Exception;
void store(JdbcXaSinkFunctionState state) throws Exception;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import javax.transaction.xa.Xid;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@Internal
class XaSinkStateHandlerImpl implements XaSinkStateHandler {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(XaSinkStateHandlerImpl.class);
private final TypeSerializer<JdbcXaSinkFunctionState> serializer;
// state could be stored as two separate lists
// on one hand this would allow more even distribution on re-scale
// on the other it would lead to more IO calls and less data locality
private transient ListState<JdbcXaSinkFunctionState> states;
XaSinkStateHandlerImpl() {
this(new XaSinkStateSerializer());
}
XaSinkStateHandlerImpl(TypeSerializer<JdbcXaSinkFunctionState> serializer) {
this.serializer = serializer;
}
@Override
public JdbcXaSinkFunctionState load(FunctionInitializationContext context) throws Exception {
states = getListState(context, serializer, "XaSinkState");
return context.isRestored() ? merge(states.get()) : JdbcXaSinkFunctionState.empty();
}
@Override
public void store(JdbcXaSinkFunctionState state) throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("store state snapshot: {}", state);
}
states.update(Collections.singletonList(state));
}
private <T> ListState<T> getListState(
FunctionInitializationContext context, TypeSerializer<T> serializer, String name) {
try {
return context.getOperatorStateStore()
.getListState(new ListStateDescriptor<>(name, serializer));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private JdbcXaSinkFunctionState merge(@Nullable Iterable<JdbcXaSinkFunctionState> states) {
if (states == null) {
return JdbcXaSinkFunctionState.empty();
}
List<Xid> hanging = new ArrayList<>();
List<CheckpointAndXid> prepared = new ArrayList<>();
states.forEach(
i -> {
hanging.addAll(i.getHanging());
prepared.addAll(i.getPrepared());
});
return JdbcXaSinkFunctionState.of(prepared, hanging);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeutils.SimpleTypeSerializerSnapshot;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.TypeSerializerSnapshot;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataOutputView;
import javax.transaction.xa.Xid;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/** XaSinkStateSerializer. */
@Internal
public final class XaSinkStateSerializer extends TypeSerializer<JdbcXaSinkFunctionState> {
private static final TypeSerializerSnapshot<JdbcXaSinkFunctionState> SNAPSHOT =
new XaSinkStateSimpleXaTypeSerializerSnapshot();
private final TypeSerializer<Xid> xidSerializer;
private final TypeSerializer<CheckpointAndXid> checkpointAndXidSerializer;
public XaSinkStateSerializer() {
this(new XidSerializer(), new CheckpointAndXidSerializer());
}
private XaSinkStateSerializer(
TypeSerializer<Xid> xidSerializer,
TypeSerializer<CheckpointAndXid> checkpointAndXidSerializer) {
this.xidSerializer = xidSerializer;
this.checkpointAndXidSerializer = checkpointAndXidSerializer;
}
@Override
public boolean isImmutableType() {
return true;
}
@Override
public TypeSerializer<JdbcXaSinkFunctionState> duplicate() {
return this;
}
@Override
public JdbcXaSinkFunctionState createInstance() {
return JdbcXaSinkFunctionState.empty();
}
@Override
public JdbcXaSinkFunctionState copy(JdbcXaSinkFunctionState from) {
return from;
}
@Override
public JdbcXaSinkFunctionState copy(
JdbcXaSinkFunctionState from, JdbcXaSinkFunctionState reuse) {
return from;
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(JdbcXaSinkFunctionState state, DataOutputView target) throws IOException {
target.writeInt(state.getHanging().size());
for (Xid h : state.getHanging()) {
xidSerializer.serialize(h, target);
}
target.writeInt(state.getPrepared().size());
for (CheckpointAndXid checkpointAndXid : state.getPrepared()) {
checkpointAndXidSerializer.serialize(checkpointAndXid, target);
}
}
@Override
public JdbcXaSinkFunctionState deserialize(DataInputView source) throws IOException {
int hangingSize = source.readInt();
List<Xid> hanging = new ArrayList<>(hangingSize);
for (int i = 0; i < hangingSize; i++) {
hanging.add(xidSerializer.deserialize(source));
}
int preparedSize = source.readInt();
List<CheckpointAndXid> prepared = new ArrayList<>(preparedSize);
for (int i = 0; i < preparedSize; i++) {
prepared.add(checkpointAndXidSerializer.deserialize(source));
}
return JdbcXaSinkFunctionState.of(prepared, hanging);
}
@Override
public JdbcXaSinkFunctionState deserialize(JdbcXaSinkFunctionState reuse, DataInputView source)
throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
serialize(deserialize(source), target);
}
@Override
public boolean equals(Object obj) {
return obj instanceof XaSinkStateSerializer;
}
@Override
public int hashCode() {
return 0;
}
@Override
public TypeSerializerSnapshot<JdbcXaSinkFunctionState> snapshotConfiguration() {
return SNAPSHOT;
}
/** Simple {@link TypeSerializerSnapshot} for {@link XaSinkStateSerializer}. */
public static class XaSinkStateSimpleXaTypeSerializerSnapshot
extends SimpleTypeSerializerSnapshot<JdbcXaSinkFunctionState> {
private static final int VERSION = 1;
public XaSinkStateSimpleXaTypeSerializerSnapshot() {
super(XaSinkStateSerializer::new);
}
@Override
public void writeSnapshot(DataOutputView out) throws IOException {
super.writeSnapshot(out);
out.writeInt(VERSION);
}
@Override
public void readSnapshot(int readVersion, DataInputView in, ClassLoader classLoader)
throws IOException {
super.readSnapshot(readVersion, in, classLoader);
in.readInt();
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.functions.RuntimeContext;
import javax.transaction.xa.Xid;
import java.io.Serializable;
import java.security.SecureRandom;
/** {@link Xid} generator. */
@Internal
public interface XidGenerator extends Serializable, AutoCloseable {
/**
* Generate new {@link Xid}. Requirements for generated Xids:
*
* <ul>
* <li>Global Transaction Id MUST be unique across Flink job, and probably across Xids
* generated by other jobs and applications - depends on the usage of this class
* <li>SHOULD be immutable
* <li>SHOULD override {@link Object#hashCode hashCode} and {@link Object#equals equals}
* </ul>
*
* @param runtimeContext can be used for example to derive global transaction id
* @param checkpointId can be used for example to derive global transaction id
*/
Xid generateXid(RuntimeContext runtimeContext, long checkpointId);
default void open() {}
/** @return true if the provided transaction belongs to this subtask */
boolean belongsToSubtask(Xid xid, RuntimeContext ctx);
@Override
default void close() {}
/**
* Creates a {@link XidGenerator} that generates {@link Xid xids} from:
*
* <ol>
* <li>job id
* <li>subtask index
* <li>checkpoint id
* <li>four random bytes generated using {@link SecureRandom})
* </ol>
*
* <p>Each created {@link XidGenerator} instance MUST be used for only one Sink instance
* (otherwise Xids could collide).
*/
static XidGenerator semanticXidGenerator() {
return new SemanticXidGenerator();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.Preconditions;
import javax.annotation.Nonnull;
import javax.transaction.xa.Xid;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Objects;
import static org.apache.flink.util.StringUtils.byteToHexString;
/**
* A simple {@link Xid} implementation that stores branch and global transaction identifiers as byte
* arrays.
*/
@Internal
final class XidImpl implements Xid, Serializable {
private static final long serialVersionUID = 1L;
private final int formatId;
@Nonnull private final byte[] globalTransactionId;
@Nonnull private final byte[] branchQualifier;
XidImpl(int formatId, byte[] globalTransactionId, byte[] branchQualifier) {
Preconditions.checkArgument(globalTransactionId.length <= Xid.MAXGTRIDSIZE);
Preconditions.checkArgument(branchQualifier.length <= Xid.MAXBQUALSIZE);
this.formatId = formatId;
this.globalTransactionId = Arrays.copyOf(globalTransactionId, globalTransactionId.length);
this.branchQualifier = Arrays.copyOf(branchQualifier, branchQualifier.length);
}
@Override
public int getFormatId() {
return formatId;
}
@Override
public byte[] getGlobalTransactionId() {
return globalTransactionId;
}
@Override
public byte[] getBranchQualifier() {
return branchQualifier;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof XidImpl)) {
return false;
}
XidImpl xid = (XidImpl) o;
return formatId == xid.formatId
&& Arrays.equals(globalTransactionId, xid.globalTransactionId)
&& Arrays.equals(branchQualifier, xid.branchQualifier);
}
@Override
public int hashCode() {
int result = Objects.hash(formatId);
result = 31 * result + Arrays.hashCode(globalTransactionId);
result = 31 * result + Arrays.hashCode(branchQualifier);
return result;
}
@Override
public String toString() {
return formatId
+ ":"
+ byteToHexString(globalTransactionId)
+ ":"
+ byteToHexString(branchQualifier);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.phoenix.xa;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeutils.SimpleTypeSerializerSnapshot;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.TypeSerializerSnapshot;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataOutputView;
import javax.transaction.xa.Xid;
import java.io.IOException;
/** {@link Xid} serializer. */
@Internal
public final class XidSerializer extends TypeSerializer<Xid> {
private static final long serialVersionUID = 1L;
private static final TypeSerializerSnapshot<Xid> SNAPSHOT =
new XidSimpleTypeSerializerSnapshot();
@Override
public boolean isImmutableType() {
return true;
}
@Override
public TypeSerializer<Xid> duplicate() {
return this;
}
@Override
public Xid createInstance() {
return new XidImpl(0, new byte[0], new byte[0]);
}
@Override
public Xid copy(Xid from) {
return from;
}
@Override
public Xid copy(Xid from, Xid reuse) {
return from;
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(Xid xid, DataOutputView target) throws IOException {
target.writeInt(xid.getFormatId());
writeBytesWithSize(target, xid.getGlobalTransactionId());
writeBytesWithSize(target, xid.getBranchQualifier());
}
@Override
public Xid deserialize(DataInputView source) throws IOException {
return new XidImpl(source.readInt(), readBytesWithSize(source), readBytesWithSize(source));
}
private void writeBytesWithSize(DataOutputView target, byte[] bytes) throws IOException {
target.writeByte(bytes.length);
target.write(bytes, 0, bytes.length);
}
private byte[] readBytesWithSize(DataInputView source) throws IOException {
byte len = source.readByte();
byte[] bytes = new byte[len];
source.read(bytes, 0, len);
return bytes;
}
@Override
public Xid deserialize(Xid reuse, DataInputView source) throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
serialize(deserialize(source), target);
}
@Override
public boolean equals(Object obj) {
return obj instanceof XidSerializer;
}
@Override
public int hashCode() {
return XidSerializer.class.hashCode();
}
@Override
public TypeSerializerSnapshot<Xid> snapshotConfiguration() {
return SNAPSHOT;
}
/** Simple {@link TypeSerializerSnapshot} for {@link XidSerializer}. */
public static class XidSimpleTypeSerializerSnapshot extends SimpleTypeSerializerSnapshot<Xid> {
private static final int VERSION = 1;
public XidSimpleTypeSerializerSnapshot() {
super(XidSerializer::new);
}
@Override
public void writeSnapshot(DataOutputView out) throws IOException {
super.writeSnapshot(out);
out.writeInt(VERSION);
}
@Override
public void readSnapshot(int readVersion, DataInputView in, ClassLoader classLoader)
throws IOException {
super.readSnapshot(readVersion, in, classLoader);
in.readInt();
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides exactly-once JDBC sink implementation using Java XA transactions API (JTA).
*
* @see org.apache.flink.connector.phoenix.xa.JdbcXaSinkFunction JdbcXaExactlyOnceSinkFunction
*/
package org.apache.flink.connector.phoenix.xa;
......@@ -481,7 +481,7 @@ public class JobManager {
try {
return FlinkAPI.build(config.getAddress()).stop(jobId);
} catch (Exception e) {
logger.info("停止作业时集群不存在");
logger.error("停止作业时集群不存在: " + e);
}
return false;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment