public class MySqlTableSource extends Object implements org.apache.flink.table.connector.source.ScanTableSource, org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata
DynamicTableSource that describes how to create a MySQL binlog source from a logical
description.| Modifier and Type | Field and Description |
|---|---|
protected List<String> |
metadataKeys
Metadata that is appended at the end of a physical source row.
|
protected org.apache.flink.table.types.DataType |
producedDataType
Data type that describes the final output of the source.
|
| Constructor and Description |
|---|
MySqlTableSource(org.apache.flink.table.catalog.ResolvedSchema physicalSchema,
int port,
String hostname,
String database,
String tableName,
String username,
String password,
java.time.ZoneId serverTimeZone,
Properties dbzProperties,
String serverId,
boolean enableParallelRead,
int splitSize,
int splitMetaGroupSize,
int fetchSize,
java.time.Duration connectTimeout,
int connectMaxRetries,
int connectionPoolSize,
double distributionFactorUpper,
double distributionFactorLower,
StartupOptions startupOptions,
boolean scanNewlyAddedTableEnabled,
Properties jdbcProperties,
java.time.Duration heartbeatInterval) |
MySqlTableSource(org.apache.flink.table.catalog.ResolvedSchema physicalSchema,
int port,
String hostname,
String database,
String tableName,
String username,
String password,
java.time.ZoneId serverTimeZone,
Properties dbzProperties,
String serverId,
boolean enableParallelRead,
int splitSize,
int splitMetaGroupSize,
int fetchSize,
java.time.Duration connectTimeout,
int connectMaxRetries,
int connectionPoolSize,
double distributionFactorUpper,
double distributionFactorLower,
StartupOptions startupOptions,
java.time.Duration heartbeatInterval) |
| Modifier and Type | Method and Description |
|---|---|
void |
applyReadableMetadata(List<String> metadataKeys,
org.apache.flink.table.types.DataType producedDataType) |
String |
asSummaryString() |
org.apache.flink.table.connector.source.DynamicTableSource |
copy() |
boolean |
equals(Object o) |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
protected com.ververica.cdc.debezium.table.MetadataConverter[] |
getMetadataConverters() |
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext) |
int |
hashCode() |
Map<String,org.apache.flink.table.types.DataType> |
listReadableMetadata() |
protected org.apache.flink.table.types.DataType producedDataType
public MySqlTableSource(org.apache.flink.table.catalog.ResolvedSchema physicalSchema,
int port,
String hostname,
String database,
String tableName,
String username,
String password,
java.time.ZoneId serverTimeZone,
Properties dbzProperties,
@Nullable
String serverId,
boolean enableParallelRead,
int splitSize,
int splitMetaGroupSize,
int fetchSize,
java.time.Duration connectTimeout,
int connectMaxRetries,
int connectionPoolSize,
double distributionFactorUpper,
double distributionFactorLower,
StartupOptions startupOptions,
java.time.Duration heartbeatInterval)
public MySqlTableSource(org.apache.flink.table.catalog.ResolvedSchema physicalSchema,
int port,
String hostname,
String database,
String tableName,
String username,
String password,
java.time.ZoneId serverTimeZone,
Properties dbzProperties,
@Nullable
String serverId,
boolean enableParallelRead,
int splitSize,
int splitMetaGroupSize,
int fetchSize,
java.time.Duration connectTimeout,
int connectMaxRetries,
int connectionPoolSize,
double distributionFactorUpper,
double distributionFactorLower,
StartupOptions startupOptions,
boolean scanNewlyAddedTableEnabled,
Properties jdbcProperties,
java.time.Duration heartbeatInterval)
public org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext)
getScanRuntimeProvider in interface org.apache.flink.table.connector.source.ScanTableSourceprotected com.ververica.cdc.debezium.table.MetadataConverter[] getMetadataConverters()
public Map<String,org.apache.flink.table.types.DataType> listReadableMetadata()
listReadableMetadata in interface org.apache.flink.table.connector.source.abilities.SupportsReadingMetadatapublic void applyReadableMetadata(List<String> metadataKeys, org.apache.flink.table.types.DataType producedDataType)
applyReadableMetadata in interface org.apache.flink.table.connector.source.abilities.SupportsReadingMetadatapublic org.apache.flink.table.connector.source.DynamicTableSource copy()
copy in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic String asSummaryString()
asSummaryString in interface org.apache.flink.table.connector.source.DynamicTableSourceCopyright © 2022 The Apache Software Foundation. All rights reserved.