diff --git a/javadoc/allclasses-frame.html b/javadoc/allclasses-frame.html index 2e1f8acf3..362d1ae41 100644 --- a/javadoc/allclasses-frame.html +++ b/javadoc/allclasses-frame.html @@ -18,6 +18,11 @@

All Classes

  • Actions
  • ActionsProvider
  • AddedRowsScanTask
  • +
  • ADLSFileIO
  • +
  • AesGcmInputFile
  • +
  • AesGcmInputStream
  • +
  • AesGcmOutputFile
  • +
  • AesGcmOutputStream
  • Aggregate
  • AggregateEvaluator
  • AliyunClientFactories
  • @@ -62,22 +67,19 @@

    All Classes

  • AvroWithSparkSchemaVisitor
  • AwsClientFactories
  • AwsClientFactory
  • +
  • AwsClientProperties
  • AwsProperties
  • +
  • AzureProperties
  • BadRequestException
  • BaseBatchReader
  • BaseColumnIterator
  • BaseCombinedScanTask
  • -
  • BaseDeleteOrphanFilesActionResult
  • -
  • BaseDeleteReachableFilesActionResult
  • -
  • BaseExpireSnapshotsActionResult
  • -
  • BaseFileGroupRewriteResult
  • BaseFileScanTask
  • BaseFileWriterFactory
  • BaseMetadataTable
  • BaseMetastoreCatalog
  • BaseMetastoreTableOperations
  • BaseMetastoreTableOperations.CommitStatus
  • -
  • BaseMigrateTableActionResult
  • BaseOverwriteFiles
  • BasePageIterator
  • BasePageIterator.IntIterator
  • @@ -87,13 +89,9 @@

    All Classes

  • BaseReplacePartitions
  • BaseReplaceSortOrder
  • BaseRewriteDataFilesAction
  • -
  • BaseRewriteDataFilesFileGroupInfo
  • -
  • BaseRewriteDataFilesResult
  • BaseRewriteManifests
  • -
  • BaseRewriteManifestsActionResult
  • BaseScanTaskGroup
  • BaseSessionCatalog
  • -
  • BaseSnapshotTableActionResult
  • BaseTable
  • BaseTaskWriter
  • BaseTransaction
  • @@ -152,6 +150,7 @@

    All Classes

  • Ciphers
  • Ciphers.AesGcmDecryptor
  • Ciphers.AesGcmEncryptor
  • +
  • CleanableFailure
  • ClientPool
  • ClientPool.Action
  • ClientPoolImpl
  • @@ -177,7 +176,10 @@

    All Classes

  • CommitReport
  • CommitReportParser
  • CommitStateUnknownException
  • +
  • CommitTransactionRequest
  • +
  • CommitTransactionRequestParser
  • Comparators
  • +
  • ComputeUpdateIterator
  • ConfigProperties
  • ConfigResponse
  • ConfigResponse.Builder
  • @@ -217,6 +219,7 @@

    All Classes

  • DataIteratorReaderFunction
  • DataOperations
  • DataReader
  • +
  • DataStatisticsCoordinatorProvider
  • DataTableScan
  • DataTask
  • DataTaskReader
  • @@ -227,13 +230,16 @@

    All Classes

  • Days
  • DaysFunction
  • DaysFunction.DateToDaysFunction
  • +
  • DaysFunction.TimestampNtzToDaysFunction
  • DaysFunction.TimestampToDaysFunction
  • DecimalUtil
  • DecimalVectorUtil
  • DecoderResolver
  • DefaultCounter
  • DefaultMetricsContext
  • +
  • DefaultSplitAssigner
  • DefaultTimer
  • +
  • DelegateFileIO
  • DelegatingInputStream
  • DelegatingOutputStream
  • DeleteCounter
  • @@ -259,13 +265,9 @@

    All Classes

  • DellClientFactories
  • DellClientFactory
  • DellProperties
  • -
  • DeltaBatchWrite
  • DeltaLakeToIcebergMigrationActionsProvider
  • DeltaLakeToIcebergMigrationActionsProvider.DefaultDeltaLakeToIcebergMigrationActions
  • -
  • DeltaWrite
  • -
  • DeltaWriteBuilder
  • -
  • DeltaWriter
  • -
  • DeltaWriterFactory
  • +
  • DictEncodedArrowConverter
  • DistributionMode
  • DoubleFieldMetrics
  • DoubleFieldMetrics.Builder
  • @@ -329,11 +331,12 @@

    All Classes

  • ExpressionVisitors.BoundVisitor
  • ExpressionVisitors.CustomOrderExpressionVisitor
  • ExpressionVisitors.ExpressionVisitor
  • -
  • ExtendedLogicalWriteInfo
  • ExtendedParser
  • ExtendedParser.RawOrderField
  • False
  • FanoutDataWriter
  • +
  • FanoutPositionOnlyDeleteWriter
  • +
  • FastForwardBranchProcedure
  • FieldMetrics
  • FileAppender
  • FileAppenderFactory
  • @@ -349,10 +352,11 @@

    All Classes

  • FileMetadata.Builder
  • FileMetadataParser
  • FileRewriteCoordinator
  • +
  • FileRewriter
  • Files
  • FileScanTask
  • +
  • FileScanTaskParser
  • FileScanTaskReader
  • -
  • FileScanTaskSetManager
  • FilesTable
  • FilesTable.FilesTableScan
  • FileWriter
  • @@ -363,6 +367,7 @@

    All Classes

  • FindFiles.Builder
  • FixedReservoirHistogram
  • FixupTypes
  • +
  • FlinkAlterTableUtil
  • FlinkAppenderFactory
  • FlinkAvroReader
  • FlinkAvroWriter
  • @@ -386,6 +391,7 @@

    All Classes

  • FlinkSink.Builder
  • FlinkSource
  • FlinkSource.Builder
  • +
  • FlinkSourceFilter
  • FlinkSplitPlanner
  • FlinkTypeVisitor
  • FlinkValueReaders
  • @@ -403,6 +409,8 @@

    All Classes

  • GenericArrowVectorAccessorFactory.DecimalFactory
  • GenericArrowVectorAccessorFactory.StringFactory
  • GenericArrowVectorAccessorFactory.StructChildFactory
  • +
  • GenericAvroReader
  • +
  • GenericAvroWriter
  • GenericBlobMetadata
  • GenericDeleteFilter
  • GenericManifestFile
  • @@ -455,9 +463,11 @@

    All Classes

  • HiveVersion
  • Hours
  • HoursFunction
  • +
  • HoursFunction.TimestampNtzToHoursFunction
  • HoursFunction.TimestampToHoursFunction
  • HTTPClient
  • HTTPClient.Builder
  • +
  • HttpClientProperties
  • IcebergArrowColumnVector
  • IcebergBinaryObjectInspector
  • IcebergBuild
  • @@ -571,6 +581,11 @@

    All Classes

  • IncrementalScanEvent
  • IndexByName
  • IndexParents
  • +
  • InMemoryCatalog
  • +
  • InMemoryFileIO
  • +
  • InMemoryInputFile
  • +
  • InMemoryMetricsReporter
  • +
  • InMemoryOutputFile
  • InputFile
  • InputFilesDecryptor
  • InputFormatConfig
  • @@ -638,12 +653,14 @@

    All Classes

  • MetadataUpdate.AddSchema
  • MetadataUpdate.AddSnapshot
  • MetadataUpdate.AddSortOrder
  • +
  • MetadataUpdate.AddViewVersion
  • MetadataUpdate.AssignUUID
  • MetadataUpdate.RemoveProperties
  • MetadataUpdate.RemoveSnapshot
  • MetadataUpdate.RemoveSnapshotRef
  • MetadataUpdate.RemoveStatistics
  • MetadataUpdate.SetCurrentSchema
  • +
  • MetadataUpdate.SetCurrentViewVersion
  • MetadataUpdate.SetDefaultPartitionSpec
  • MetadataUpdate.SetDefaultSortOrder
  • MetadataUpdate.SetLocation
  • @@ -667,6 +684,7 @@

    All Classes

  • MetricsModes.Truncate
  • MetricsReport
  • MetricsReporter
  • +
  • MetricsReporters
  • MetricsUtil
  • MetricsUtil.ReadableColMetricsStruct
  • MetricsUtil.ReadableMetricColDefinition
  • @@ -683,6 +701,7 @@

    All Classes

  • Months
  • MonthsFunction
  • MonthsFunction.DateToMonthsFunction
  • +
  • MonthsFunction.TimestampNtzToMonthsFunction
  • MonthsFunction.TimestampToMonthsFunction
  • NamedReference
  • NameMapping
  • @@ -697,6 +716,7 @@

    All Classes

  • NessieIcebergClient
  • NessieTableOperations
  • NessieUtil
  • +
  • NoLock
  • NoSuchIcebergTableException
  • NoSuchNamespaceException
  • NoSuchProcedureException
  • @@ -734,6 +754,7 @@

    All Classes

  • OrcValueReaders
  • OrcValueReaders.StructReader
  • OrcValueWriter
  • +
  • OrderedSplitAssignerFactory
  • OSSFileIO
  • OSSOutputStream
  • OSSURI
  • @@ -810,8 +831,10 @@

    All Classes

  • PendingUpdate
  • PigParquetReader
  • PlaintextEncryptionManager
  • +
  • PlanningMode
  • PositionDelete
  • PositionDeleteIndex
  • +
  • PositionDeletesRewriteCoordinator
  • PositionDeletesScanTask
  • PositionDeletesTable
  • PositionDeletesTable.PositionDeletesBatchScan
  • @@ -836,19 +859,24 @@

    All Classes

  • PuffinReader
  • PuffinWriter
  • RangeReadable
  • +
  • RawDecoder
  • ReachableFileUtil
  • ReaderFunction
  • Record
  • RecordAndPosition
  • Reference
  • RefsTable
  • +
  • RegisterTableRequest
  • +
  • RegisterTableRequestParser
  • RemoveIds
  • RemoveIds
  • +
  • RemoveNetCarryoverIterator
  • RemoveOrphanFilesProcedure
  • RenameTableRequest
  • RenameTableRequest.Builder
  • ReplacePartitions
  • ReplaceSortOrder
  • +
  • ReplaceViewVersion
  • ReportMetricsRequest
  • ReportMetricsRequest.ReportType
  • ReportMetricsRequestParser
  • @@ -862,6 +890,8 @@

    All Classes

  • RESTRequest
  • RESTResponse
  • RESTSerializers
  • +
  • RESTSerializers.CommitTransactionRequestDeserializer
  • +
  • RESTSerializers.CommitTransactionRequestSerializer
  • RESTSerializers.ErrorResponseDeserializer
  • RESTSerializers.ErrorResponseSerializer
  • RESTSerializers.MetadataUpdateDeserializer
  • @@ -870,6 +900,8 @@

    All Classes

  • RESTSerializers.NamespaceSerializer
  • RESTSerializers.OAuthTokenResponseDeserializer
  • RESTSerializers.OAuthTokenResponseSerializer
  • +
  • RESTSerializers.RegisterTableRequestDeserializer
  • +
  • RESTSerializers.RegisterTableRequestSerializer
  • RESTSerializers.ReportMetricsRequestDeserializer
  • RESTSerializers.ReportMetricsRequestSerializer
  • RESTSerializers.SchemaDeserializer
  • @@ -884,10 +916,14 @@

    All Classes

  • RESTSerializers.UnboundSortOrderSerializer
  • RESTSerializers.UpdateRequirementDeserializer
  • RESTSerializers.UpdateRequirementSerializer
  • +
  • RESTSerializers.UpdateTableRequestDeserializer
  • +
  • RESTSerializers.UpdateTableRequestSerializer
  • RESTSessionCatalog
  • RESTSigV4Signer
  • RESTUtil
  • +
  • RetryDetector
  • RewriteDataFiles
  • +
  • RewriteDataFiles.FileGroupFailureResult
  • RewriteDataFiles.FileGroupInfo
  • RewriteDataFiles.FileGroupRewriteResult
  • RewriteDataFiles.Result
  • @@ -903,12 +939,18 @@

    All Classes

  • RewriteManifests.Result
  • RewriteManifestsSparkAction
  • RewritePositionDeleteFiles
  • +
  • RewritePositionDeleteFiles.FileGroupInfo
  • +
  • RewritePositionDeleteFiles.FileGroupRewriteResult
  • RewritePositionDeleteFiles.Result
  • -
  • RewritePositionDeleteStrategy
  • +
  • RewritePositionDeleteFilesProcedure
  • +
  • RewritePositionDeleteFilesSparkAction
  • +
  • RewritePositionDeletesCommitManager
  • +
  • RewritePositionDeletesGroup
  • RewriteStrategy
  • RollbackStagedTable
  • RollingDataWriter
  • RollingEqualityDeleteWriter
  • +
  • RollingManifestWriter
  • RollingPositionDeleteWriter
  • RowDataFileScanTaskReader
  • RowDataProjection
  • @@ -925,6 +967,9 @@

    All Classes

  • RuntimeIOException
  • RuntimeMetaException
  • S3FileIO
  • +
  • S3FileIOAwsClientFactories
  • +
  • S3FileIOAwsClientFactory
  • +
  • S3FileIOProperties
  • S3InputFile
  • S3ObjectMapper
  • S3ObjectMapper.S3SignRequestDeserializer
  • @@ -944,6 +989,9 @@

    All Classes

  • ScanEvent
  • ScanMetrics
  • ScanMetricsResult
  • +
  • ScanMetricsUtil
  • +
  • ScannedDataFiles
  • +
  • ScannedDataManifests
  • ScanReport
  • ScanReportParser
  • ScanSummary
  • @@ -958,6 +1006,7 @@

    All Classes

  • SchemaWithPartnerVisitor
  • SchemaWithPartnerVisitor.PartnerAccessors
  • SeekableInputStream
  • +
  • SerializableComparator
  • SerializableConfiguration
  • SerializableFunction
  • SerializableMap
  • @@ -974,9 +1023,13 @@

    All Classes

  • SetAccumulator
  • SetLocation
  • SetStatistics
  • -
  • SimpleSplitAssigner
  • SimpleSplitAssignerFactory
  • SingleValueParser
  • +
  • SizeBasedDataRewriter
  • +
  • SizeBasedFileRewriter
  • +
  • SizeBasedPositionDeletesRewriter
  • +
  • SkippedDataFiles
  • +
  • SkippedDataManifests
  • Snapshot
  • SnapshotDeltaLakeTable
  • SnapshotDeltaLakeTable.Result
  • @@ -1001,6 +1054,7 @@

    All Classes

  • SortDirection
  • SortedMerge
  • SortField
  • +
  • SortingPositionOnlyDeleteWriter
  • SortOrder
  • SortOrder.Builder
  • SortOrderBuilder
  • @@ -1015,14 +1069,14 @@

    All Classes

  • SparkAggregates
  • SparkAvroReader
  • SparkAvroWriter
  • -
  • SparkBinPackStrategy
  • SparkCachedTableCatalog
  • SparkCatalog
  • SparkChangelogTable
  • SparkDataFile
  • -
  • SparkDistributionAndOrderingUtil
  • +
  • SparkDistributedDataScan
  • SparkExceptionUtil
  • SparkFilters
  • +
  • SparkFunctionCatalog
  • SparkFunctions
  • SparkMetadataColumn
  • SparkMicroBatchStream
  • @@ -1033,6 +1087,9 @@

    All Classes

  • SparkParquetWriters
  • SparkPartitionedFanoutWriter
  • SparkPartitionedWriter
  • +
  • SparkPositionDeletesRewrite
  • +
  • SparkPositionDeletesRewrite.DeleteTaskCommit
  • +
  • SparkPositionDeletesRewriteBuilder
  • SparkProcedures
  • SparkProcedures.ProcedureBuilder
  • SparkReadConf
  • @@ -1040,7 +1097,6 @@

    All Classes

  • SparkScanBuilder
  • SparkSchemaUtil
  • SparkSessionCatalog
  • -
  • SparkSortStrategy
  • SparkSQLProperties
  • SparkStructLike
  • SparkTable
  • @@ -1054,10 +1110,12 @@

    All Classes

  • SparkValueWriters
  • SparkWriteConf
  • SparkWriteOptions
  • -
  • SparkZOrderStrategy
  • +
  • SparkWriteRequirements
  • +
  • SparkWriteUtil
  • SplitAssigner
  • SplitAssignerFactory
  • SplitAssignerType
  • +
  • SplitComparators
  • SplitRequestEvent
  • SplittableScanTask
  • SQLViewRepresentation
  • @@ -1080,12 +1138,14 @@

    All Classes

  • StructProjection
  • StructRowData
  • SupportsBulkOperations
  • -
  • SupportsDelta
  • SupportsNamespaces
  • SupportsPrefixOperations
  • SupportsRowPosition
  • +
  • SystemConfigs
  • +
  • SystemConfigs.ConfigEntry
  • SystemProperties
  • Table
  • +
  • TableCommit
  • TableIdentifier
  • TableIdentifierParser
  • TableLoader
  • @@ -1110,6 +1170,12 @@

    All Classes

  • Tasks.FailureTask
  • Tasks.Task
  • Tasks.UnrecoverableException
  • +
  • TaskScannedDataFiles
  • +
  • TaskScannedDataManifests
  • +
  • TaskSkippedDataFiles
  • +
  • TaskSkippedDataManifests
  • +
  • TaskTotalFileSize
  • +
  • TaskTotalPlanningDuration
  • TaskWriter
  • TaskWriterFactory
  • Term
  • @@ -1118,6 +1184,8 @@

    All Classes

  • Timer
  • Timer.Timed
  • TimerResult
  • +
  • TotalFileSize
  • +
  • TotalPlanningDuration
  • Transaction
  • Transactions
  • Transform
  • @@ -1184,7 +1252,18 @@

    All Classes

  • UpdateNamespacePropertiesResponse.Builder
  • UpdatePartitionSpec
  • UpdateProperties
  • +
  • UpdateRequirement
  • +
  • UpdateRequirement.AssertCurrentSchemaID
  • +
  • UpdateRequirement.AssertDefaultSortOrderID
  • +
  • UpdateRequirement.AssertDefaultSpecID
  • +
  • UpdateRequirement.AssertLastAssignedFieldId
  • +
  • UpdateRequirement.AssertLastAssignedPartitionId
  • +
  • UpdateRequirement.AssertRefSnapshotID
  • +
  • UpdateRequirement.AssertTableDoesNotExist
  • +
  • UpdateRequirement.AssertTableUUID
  • UpdateRequirementParser
  • +
  • UpdateRequirementParser
  • +
  • UpdateRequirements
  • UpdateSchema
  • UpdateStatistics
  • UpdateTableRequest
  • @@ -1198,6 +1277,7 @@

    All Classes

  • UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID
  • UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist
  • UpdateTableRequest.UpdateRequirement.AssertTableUUID
  • +
  • UpdateTableRequestParser
  • UpdateViewProperties
  • Util
  • UUIDConversion
  • @@ -1230,19 +1310,26 @@

    All Classes

  • VectorizedSupport
  • VectorizedSupport.Support
  • VectorizedTableScanIterable
  • +
  • VersionBuilder
  • View
  • ViewBuilder
  • ViewCatalog
  • ViewHistoryEntry
  • +
  • ViewMetadata
  • +
  • ViewMetadata.Builder
  • +
  • ViewMetadataParser
  • +
  • ViewProperties
  • ViewRepresentation
  • ViewRepresentation.Type
  • ViewVersion
  • +
  • ViewVersionParser
  • WapUtil
  • WriteObjectInspector
  • WriteResult
  • WriteResult.Builder
  • YearsFunction
  • YearsFunction.DateToYearsFunction
  • +
  • YearsFunction.TimestampNtzToYearsFunction
  • YearsFunction.TimestampToYearsFunction
  • Zorder
  • ZOrderByteUtils
  • diff --git a/javadoc/allclasses-noframe.html b/javadoc/allclasses-noframe.html index 3f986a4fd..19cbf5a6f 100644 --- a/javadoc/allclasses-noframe.html +++ b/javadoc/allclasses-noframe.html @@ -18,6 +18,11 @@

    All Classes

  • Actions
  • ActionsProvider
  • AddedRowsScanTask
  • +
  • ADLSFileIO
  • +
  • AesGcmInputFile
  • +
  • AesGcmInputStream
  • +
  • AesGcmOutputFile
  • +
  • AesGcmOutputStream
  • Aggregate
  • AggregateEvaluator
  • AliyunClientFactories
  • @@ -62,22 +67,19 @@

    All Classes

  • AvroWithSparkSchemaVisitor
  • AwsClientFactories
  • AwsClientFactory
  • +
  • AwsClientProperties
  • AwsProperties
  • +
  • AzureProperties
  • BadRequestException
  • BaseBatchReader
  • BaseColumnIterator
  • BaseCombinedScanTask
  • -
  • BaseDeleteOrphanFilesActionResult
  • -
  • BaseDeleteReachableFilesActionResult
  • -
  • BaseExpireSnapshotsActionResult
  • -
  • BaseFileGroupRewriteResult
  • BaseFileScanTask
  • BaseFileWriterFactory
  • BaseMetadataTable
  • BaseMetastoreCatalog
  • BaseMetastoreTableOperations
  • BaseMetastoreTableOperations.CommitStatus
  • -
  • BaseMigrateTableActionResult
  • BaseOverwriteFiles
  • BasePageIterator
  • BasePageIterator.IntIterator
  • @@ -87,13 +89,9 @@

    All Classes

  • BaseReplacePartitions
  • BaseReplaceSortOrder
  • BaseRewriteDataFilesAction
  • -
  • BaseRewriteDataFilesFileGroupInfo
  • -
  • BaseRewriteDataFilesResult
  • BaseRewriteManifests
  • -
  • BaseRewriteManifestsActionResult
  • BaseScanTaskGroup
  • BaseSessionCatalog
  • -
  • BaseSnapshotTableActionResult
  • BaseTable
  • BaseTaskWriter
  • BaseTransaction
  • @@ -152,6 +150,7 @@

    All Classes

  • Ciphers
  • Ciphers.AesGcmDecryptor
  • Ciphers.AesGcmEncryptor
  • +
  • CleanableFailure
  • ClientPool
  • ClientPool.Action
  • ClientPoolImpl
  • @@ -177,7 +176,10 @@

    All Classes

  • CommitReport
  • CommitReportParser
  • CommitStateUnknownException
  • +
  • CommitTransactionRequest
  • +
  • CommitTransactionRequestParser
  • Comparators
  • +
  • ComputeUpdateIterator
  • ConfigProperties
  • ConfigResponse
  • ConfigResponse.Builder
  • @@ -217,6 +219,7 @@

    All Classes

  • DataIteratorReaderFunction
  • DataOperations
  • DataReader
  • +
  • DataStatisticsCoordinatorProvider
  • DataTableScan
  • DataTask
  • DataTaskReader
  • @@ -227,13 +230,16 @@

    All Classes

  • Days
  • DaysFunction
  • DaysFunction.DateToDaysFunction
  • +
  • DaysFunction.TimestampNtzToDaysFunction
  • DaysFunction.TimestampToDaysFunction
  • DecimalUtil
  • DecimalVectorUtil
  • DecoderResolver
  • DefaultCounter
  • DefaultMetricsContext
  • +
  • DefaultSplitAssigner
  • DefaultTimer
  • +
  • DelegateFileIO
  • DelegatingInputStream
  • DelegatingOutputStream
  • DeleteCounter
  • @@ -259,13 +265,9 @@

    All Classes

  • DellClientFactories
  • DellClientFactory
  • DellProperties
  • -
  • DeltaBatchWrite
  • DeltaLakeToIcebergMigrationActionsProvider
  • DeltaLakeToIcebergMigrationActionsProvider.DefaultDeltaLakeToIcebergMigrationActions
  • -
  • DeltaWrite
  • -
  • DeltaWriteBuilder
  • -
  • DeltaWriter
  • -
  • DeltaWriterFactory
  • +
  • DictEncodedArrowConverter
  • DistributionMode
  • DoubleFieldMetrics
  • DoubleFieldMetrics.Builder
  • @@ -329,11 +331,12 @@

    All Classes

  • ExpressionVisitors.BoundVisitor
  • ExpressionVisitors.CustomOrderExpressionVisitor
  • ExpressionVisitors.ExpressionVisitor
  • -
  • ExtendedLogicalWriteInfo
  • ExtendedParser
  • ExtendedParser.RawOrderField
  • False
  • FanoutDataWriter
  • +
  • FanoutPositionOnlyDeleteWriter
  • +
  • FastForwardBranchProcedure
  • FieldMetrics
  • FileAppender
  • FileAppenderFactory
  • @@ -349,10 +352,11 @@

    All Classes

  • FileMetadata.Builder
  • FileMetadataParser
  • FileRewriteCoordinator
  • +
  • FileRewriter
  • Files
  • FileScanTask
  • +
  • FileScanTaskParser
  • FileScanTaskReader
  • -
  • FileScanTaskSetManager
  • FilesTable
  • FilesTable.FilesTableScan
  • FileWriter
  • @@ -363,6 +367,7 @@

    All Classes

  • FindFiles.Builder
  • FixedReservoirHistogram
  • FixupTypes
  • +
  • FlinkAlterTableUtil
  • FlinkAppenderFactory
  • FlinkAvroReader
  • FlinkAvroWriter
  • @@ -386,6 +391,7 @@

    All Classes

  • FlinkSink.Builder
  • FlinkSource
  • FlinkSource.Builder
  • +
  • FlinkSourceFilter
  • FlinkSplitPlanner
  • FlinkTypeVisitor
  • FlinkValueReaders
  • @@ -403,6 +409,8 @@

    All Classes

  • GenericArrowVectorAccessorFactory.DecimalFactory
  • GenericArrowVectorAccessorFactory.StringFactory
  • GenericArrowVectorAccessorFactory.StructChildFactory
  • +
  • GenericAvroReader
  • +
  • GenericAvroWriter
  • GenericBlobMetadata
  • GenericDeleteFilter
  • GenericManifestFile
  • @@ -455,9 +463,11 @@

    All Classes

  • HiveVersion
  • Hours
  • HoursFunction
  • +
  • HoursFunction.TimestampNtzToHoursFunction
  • HoursFunction.TimestampToHoursFunction
  • HTTPClient
  • HTTPClient.Builder
  • +
  • HttpClientProperties
  • IcebergArrowColumnVector
  • IcebergBinaryObjectInspector
  • IcebergBuild
  • @@ -571,6 +581,11 @@

    All Classes

  • IncrementalScanEvent
  • IndexByName
  • IndexParents
  • +
  • InMemoryCatalog
  • +
  • InMemoryFileIO
  • +
  • InMemoryInputFile
  • +
  • InMemoryMetricsReporter
  • +
  • InMemoryOutputFile
  • InputFile
  • InputFilesDecryptor
  • InputFormatConfig
  • @@ -638,12 +653,14 @@

    All Classes

  • MetadataUpdate.AddSchema
  • MetadataUpdate.AddSnapshot
  • MetadataUpdate.AddSortOrder
  • +
  • MetadataUpdate.AddViewVersion
  • MetadataUpdate.AssignUUID
  • MetadataUpdate.RemoveProperties
  • MetadataUpdate.RemoveSnapshot
  • MetadataUpdate.RemoveSnapshotRef
  • MetadataUpdate.RemoveStatistics
  • MetadataUpdate.SetCurrentSchema
  • +
  • MetadataUpdate.SetCurrentViewVersion
  • MetadataUpdate.SetDefaultPartitionSpec
  • MetadataUpdate.SetDefaultSortOrder
  • MetadataUpdate.SetLocation
  • @@ -667,6 +684,7 @@

    All Classes

  • MetricsModes.Truncate
  • MetricsReport
  • MetricsReporter
  • +
  • MetricsReporters
  • MetricsUtil
  • MetricsUtil.ReadableColMetricsStruct
  • MetricsUtil.ReadableMetricColDefinition
  • @@ -683,6 +701,7 @@

    All Classes

  • Months
  • MonthsFunction
  • MonthsFunction.DateToMonthsFunction
  • +
  • MonthsFunction.TimestampNtzToMonthsFunction
  • MonthsFunction.TimestampToMonthsFunction
  • NamedReference
  • NameMapping
  • @@ -697,6 +716,7 @@

    All Classes

  • NessieIcebergClient
  • NessieTableOperations
  • NessieUtil
  • +
  • NoLock
  • NoSuchIcebergTableException
  • NoSuchNamespaceException
  • NoSuchProcedureException
  • @@ -734,6 +754,7 @@

    All Classes

  • OrcValueReaders
  • OrcValueReaders.StructReader
  • OrcValueWriter
  • +
  • OrderedSplitAssignerFactory
  • OSSFileIO
  • OSSOutputStream
  • OSSURI
  • @@ -810,8 +831,10 @@

    All Classes

  • PendingUpdate
  • PigParquetReader
  • PlaintextEncryptionManager
  • +
  • PlanningMode
  • PositionDelete
  • PositionDeleteIndex
  • +
  • PositionDeletesRewriteCoordinator
  • PositionDeletesScanTask
  • PositionDeletesTable
  • PositionDeletesTable.PositionDeletesBatchScan
  • @@ -836,19 +859,24 @@

    All Classes

  • PuffinReader
  • PuffinWriter
  • RangeReadable
  • +
  • RawDecoder
  • ReachableFileUtil
  • ReaderFunction
  • Record
  • RecordAndPosition
  • Reference
  • RefsTable
  • +
  • RegisterTableRequest
  • +
  • RegisterTableRequestParser
  • RemoveIds
  • RemoveIds
  • +
  • RemoveNetCarryoverIterator
  • RemoveOrphanFilesProcedure
  • RenameTableRequest
  • RenameTableRequest.Builder
  • ReplacePartitions
  • ReplaceSortOrder
  • +
  • ReplaceViewVersion
  • ReportMetricsRequest
  • ReportMetricsRequest.ReportType
  • ReportMetricsRequestParser
  • @@ -862,6 +890,8 @@

    All Classes

  • RESTRequest
  • RESTResponse
  • RESTSerializers
  • +
  • RESTSerializers.CommitTransactionRequestDeserializer
  • +
  • RESTSerializers.CommitTransactionRequestSerializer
  • RESTSerializers.ErrorResponseDeserializer
  • RESTSerializers.ErrorResponseSerializer
  • RESTSerializers.MetadataUpdateDeserializer
  • @@ -870,6 +900,8 @@

    All Classes

  • RESTSerializers.NamespaceSerializer
  • RESTSerializers.OAuthTokenResponseDeserializer
  • RESTSerializers.OAuthTokenResponseSerializer
  • +
  • RESTSerializers.RegisterTableRequestDeserializer
  • +
  • RESTSerializers.RegisterTableRequestSerializer
  • RESTSerializers.ReportMetricsRequestDeserializer
  • RESTSerializers.ReportMetricsRequestSerializer
  • RESTSerializers.SchemaDeserializer
  • @@ -884,10 +916,14 @@

    All Classes

  • RESTSerializers.UnboundSortOrderSerializer
  • RESTSerializers.UpdateRequirementDeserializer
  • RESTSerializers.UpdateRequirementSerializer
  • +
  • RESTSerializers.UpdateTableRequestDeserializer
  • +
  • RESTSerializers.UpdateTableRequestSerializer
  • RESTSessionCatalog
  • RESTSigV4Signer
  • RESTUtil
  • +
  • RetryDetector
  • RewriteDataFiles
  • +
  • RewriteDataFiles.FileGroupFailureResult
  • RewriteDataFiles.FileGroupInfo
  • RewriteDataFiles.FileGroupRewriteResult
  • RewriteDataFiles.Result
  • @@ -903,12 +939,18 @@

    All Classes

  • RewriteManifests.Result
  • RewriteManifestsSparkAction
  • RewritePositionDeleteFiles
  • +
  • RewritePositionDeleteFiles.FileGroupInfo
  • +
  • RewritePositionDeleteFiles.FileGroupRewriteResult
  • RewritePositionDeleteFiles.Result
  • -
  • RewritePositionDeleteStrategy
  • +
  • RewritePositionDeleteFilesProcedure
  • +
  • RewritePositionDeleteFilesSparkAction
  • +
  • RewritePositionDeletesCommitManager
  • +
  • RewritePositionDeletesGroup
  • RewriteStrategy
  • RollbackStagedTable
  • RollingDataWriter
  • RollingEqualityDeleteWriter
  • +
  • RollingManifestWriter
  • RollingPositionDeleteWriter
  • RowDataFileScanTaskReader
  • RowDataProjection
  • @@ -925,6 +967,9 @@

    All Classes

  • RuntimeIOException
  • RuntimeMetaException
  • S3FileIO
  • +
  • S3FileIOAwsClientFactories
  • +
  • S3FileIOAwsClientFactory
  • +
  • S3FileIOProperties
  • S3InputFile
  • S3ObjectMapper
  • S3ObjectMapper.S3SignRequestDeserializer
  • @@ -944,6 +989,9 @@

    All Classes

  • ScanEvent
  • ScanMetrics
  • ScanMetricsResult
  • +
  • ScanMetricsUtil
  • +
  • ScannedDataFiles
  • +
  • ScannedDataManifests
  • ScanReport
  • ScanReportParser
  • ScanSummary
  • @@ -958,6 +1006,7 @@

    All Classes

  • SchemaWithPartnerVisitor
  • SchemaWithPartnerVisitor.PartnerAccessors
  • SeekableInputStream
  • +
  • SerializableComparator
  • SerializableConfiguration
  • SerializableFunction
  • SerializableMap
  • @@ -974,9 +1023,13 @@

    All Classes

  • SetAccumulator
  • SetLocation
  • SetStatistics
  • -
  • SimpleSplitAssigner
  • SimpleSplitAssignerFactory
  • SingleValueParser
  • +
  • SizeBasedDataRewriter
  • +
  • SizeBasedFileRewriter
  • +
  • SizeBasedPositionDeletesRewriter
  • +
  • SkippedDataFiles
  • +
  • SkippedDataManifests
  • Snapshot
  • SnapshotDeltaLakeTable
  • SnapshotDeltaLakeTable.Result
  • @@ -1001,6 +1054,7 @@

    All Classes

  • SortDirection
  • SortedMerge
  • SortField
  • +
  • SortingPositionOnlyDeleteWriter
  • SortOrder
  • SortOrder.Builder
  • SortOrderBuilder
  • @@ -1015,14 +1069,14 @@

    All Classes

  • SparkAggregates
  • SparkAvroReader
  • SparkAvroWriter
  • -
  • SparkBinPackStrategy
  • SparkCachedTableCatalog
  • SparkCatalog
  • SparkChangelogTable
  • SparkDataFile
  • -
  • SparkDistributionAndOrderingUtil
  • +
  • SparkDistributedDataScan
  • SparkExceptionUtil
  • SparkFilters
  • +
  • SparkFunctionCatalog
  • SparkFunctions
  • SparkMetadataColumn
  • SparkMicroBatchStream
  • @@ -1033,6 +1087,9 @@

    All Classes

  • SparkParquetWriters
  • SparkPartitionedFanoutWriter
  • SparkPartitionedWriter
  • +
  • SparkPositionDeletesRewrite
  • +
  • SparkPositionDeletesRewrite.DeleteTaskCommit
  • +
  • SparkPositionDeletesRewriteBuilder
  • SparkProcedures
  • SparkProcedures.ProcedureBuilder
  • SparkReadConf
  • @@ -1040,7 +1097,6 @@

    All Classes

  • SparkScanBuilder
  • SparkSchemaUtil
  • SparkSessionCatalog
  • -
  • SparkSortStrategy
  • SparkSQLProperties
  • SparkStructLike
  • SparkTable
  • @@ -1054,10 +1110,12 @@

    All Classes

  • SparkValueWriters
  • SparkWriteConf
  • SparkWriteOptions
  • -
  • SparkZOrderStrategy
  • +
  • SparkWriteRequirements
  • +
  • SparkWriteUtil
  • SplitAssigner
  • SplitAssignerFactory
  • SplitAssignerType
  • +
  • SplitComparators
  • SplitRequestEvent
  • SplittableScanTask
  • SQLViewRepresentation
  • @@ -1080,12 +1138,14 @@

    All Classes

  • StructProjection
  • StructRowData
  • SupportsBulkOperations
  • -
  • SupportsDelta
  • SupportsNamespaces
  • SupportsPrefixOperations
  • SupportsRowPosition
  • +
  • SystemConfigs
  • +
  • SystemConfigs.ConfigEntry
  • SystemProperties
  • Table
  • +
  • TableCommit
  • TableIdentifier
  • TableIdentifierParser
  • TableLoader
  • @@ -1110,6 +1170,12 @@

    All Classes

  • Tasks.FailureTask
  • Tasks.Task
  • Tasks.UnrecoverableException
  • +
  • TaskScannedDataFiles
  • +
  • TaskScannedDataManifests
  • +
  • TaskSkippedDataFiles
  • +
  • TaskSkippedDataManifests
  • +
  • TaskTotalFileSize
  • +
  • TaskTotalPlanningDuration
  • TaskWriter
  • TaskWriterFactory
  • Term
  • @@ -1118,6 +1184,8 @@

    All Classes

  • Timer
  • Timer.Timed
  • TimerResult
  • +
  • TotalFileSize
  • +
  • TotalPlanningDuration
  • Transaction
  • Transactions
  • Transform
  • @@ -1184,7 +1252,18 @@

    All Classes

  • UpdateNamespacePropertiesResponse.Builder
  • UpdatePartitionSpec
  • UpdateProperties
  • +
  • UpdateRequirement
  • +
  • UpdateRequirement.AssertCurrentSchemaID
  • +
  • UpdateRequirement.AssertDefaultSortOrderID
  • +
  • UpdateRequirement.AssertDefaultSpecID
  • +
  • UpdateRequirement.AssertLastAssignedFieldId
  • +
  • UpdateRequirement.AssertLastAssignedPartitionId
  • +
  • UpdateRequirement.AssertRefSnapshotID
  • +
  • UpdateRequirement.AssertTableDoesNotExist
  • +
  • UpdateRequirement.AssertTableUUID
  • UpdateRequirementParser
  • +
  • UpdateRequirementParser
  • +
  • UpdateRequirements
  • UpdateSchema
  • UpdateStatistics
  • UpdateTableRequest
  • @@ -1198,6 +1277,7 @@

    All Classes

  • UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID
  • UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist
  • UpdateTableRequest.UpdateRequirement.AssertTableUUID
  • +
  • UpdateTableRequestParser
  • UpdateViewProperties
  • Util
  • UUIDConversion
  • @@ -1230,19 +1310,26 @@

    All Classes

  • VectorizedSupport
  • VectorizedSupport.Support
  • VectorizedTableScanIterable
  • +
  • VersionBuilder
  • View
  • ViewBuilder
  • ViewCatalog
  • ViewHistoryEntry
  • +
  • ViewMetadata
  • +
  • ViewMetadata.Builder
  • +
  • ViewMetadataParser
  • +
  • ViewProperties
  • ViewRepresentation
  • ViewRepresentation.Type
  • ViewVersion
  • +
  • ViewVersionParser
  • WapUtil
  • WriteObjectInspector
  • WriteResult
  • WriteResult.Builder
  • YearsFunction
  • YearsFunction.DateToYearsFunction
  • +
  • YearsFunction.TimestampNtzToYearsFunction
  • YearsFunction.TimestampToYearsFunction
  • Zorder
  • ZOrderByteUtils
  • diff --git a/javadoc/constant-values.html b/javadoc/constant-values.html index 86abc0ab5..f18a58d6c 100644 --- a/javadoc/constant-values.html +++ b/javadoc/constant-values.html @@ -136,25 +136,25 @@

    org.apache.*

    "app-id" - + public static final java.lang.String -AUTH_DEFAULT_REFRESH_ENABLED -"auth.default-refresh-enabled" +AUTH_SESSION_TIMEOUT_MS +"auth.session-timeout-ms" - + -public static final boolean -AUTH_DEFAULT_REFRESH_ENABLED_DEFAULT -false +public static final java.lang.String +CACHE_CASE_SENSITIVE +"cache.case-sensitive" - + -public static final java.lang.String -AUTH_SESSION_TIMEOUT_MS -"auth.session-timeout-ms" +public static final boolean +CACHE_CASE_SENSITIVE_DEFAULT +true @@ -199,167 +199,174 @@

    org.apache.*

    "client.pool.cache.eviction-interval-ms" +
    + +public static final java.lang.String +CLIENT_POOL_CACHE_KEYS +"client-pool-cache-keys" + + public static final java.lang.String CLIENT_POOL_SIZE "clients" - + public static final int CLIENT_POOL_SIZE_DEFAULT 2 - + public static final java.lang.String FILE_IO_IMPL "io-impl" - + public static final java.lang.String IO_MANIFEST_CACHE_ENABLED "io.manifest.cache-enabled" - + public static final boolean IO_MANIFEST_CACHE_ENABLED_DEFAULT false - + public static final java.lang.String IO_MANIFEST_CACHE_EXPIRATION_INTERVAL_MS "io.manifest.cache.expiration-interval-ms" - + public static final java.lang.String IO_MANIFEST_CACHE_MAX_CONTENT_LENGTH "io.manifest.cache.max-content-length" - + public static final long IO_MANIFEST_CACHE_MAX_CONTENT_LENGTH_DEFAULT 8388608L - + public static final java.lang.String IO_MANIFEST_CACHE_MAX_TOTAL_BYTES "io.manifest.cache.max-total-bytes" - + public static final long IO_MANIFEST_CACHE_MAX_TOTAL_BYTES_DEFAULT 104857600L - + public static final java.lang.String LOCK_ACQUIRE_INTERVAL_MS "lock.acquire-interval-ms" - + public static final java.lang.String LOCK_ACQUIRE_TIMEOUT_MS "lock.acquire-timeout-ms" - + public static final java.lang.String LOCK_HEARTBEAT_INTERVAL_MS "lock.heartbeat-interval-ms" - + public static final java.lang.String LOCK_HEARTBEAT_THREADS "lock.heartbeat-threads" - + public static final int LOCK_HEARTBEAT_THREADS_DEFAULT 4 - + public static final java.lang.String LOCK_HEARTBEAT_TIMEOUT_MS "lock.heartbeat-timeout-ms" - + public static final java.lang.String LOCK_IMPL "lock-impl" - + public static final java.lang.String LOCK_TABLE "lock.table" - + public static final java.lang.String METRICS_REPORTER_IMPL "metrics-reporter-impl" - + public static final java.lang.String TABLE_DEFAULT_PREFIX "table-default." - + public static final java.lang.String TABLE_OVERRIDE_PREFIX "table-override." - + public static final java.lang.String URI "uri" - + public static final java.lang.String USER "user" - + public static final java.lang.String @@ -632,6 +639,39 @@

    org.apache.*

  • + + + + + + + + + + + + + + + + + + + + + + + +
    org.apache.iceberg.PositionDeletesTable 
    Modifier and TypeConstant FieldValue
    + +public static final java.lang.StringDELETE_FILE_PATH"delete_file_path"
    + +public static final java.lang.StringPARTITION"partition"
    + +public static final java.lang.StringSPEC_ID"spec_id"
    +
  • +
  • + @@ -928,6 +968,20 @@

    org.apache.*

    + + + + + + + + + + @@ -1089,194 +1143,208 @@

    org.apache.*

    + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + - - - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -2197,7 +2272,7 @@

    org.apache.*

    public static final int - +
    org.apache.iceberg.SnapshotRef 
    Modifier and Type
    + +public static final java.lang.StringADAPTIVE_SPLIT_SIZE_ENABLED"read.split.adaptive-size.enabled"
    + +public static final booleanADAPTIVE_SPLIT_SIZE_ENABLED_DEFAULTtrue
    public static final java.lang.String "current-snapshot-timestamp-ms"
    + +public static final java.lang.StringDATA_PLANNING_MODE"read.data-planning-mode"
    public static final java.lang.String DEFAULT_FILE_FORMAT "write.format.default"
    public static final java.lang.String DEFAULT_FILE_FORMAT_DEFAULT "parquet"
    public static final java.lang.String DEFAULT_NAME_MAPPING "schema.name-mapping.default"
    public static final java.lang.String DEFAULT_PARTITION_SPEC "default-partition-spec"
    public static final java.lang.String DEFAULT_SORT_ORDER "default-sort-order"
    public static final java.lang.String DEFAULT_WRITE_METRICS_MODE "write.metadata.metrics.default"
    public static final java.lang.String DEFAULT_WRITE_METRICS_MODE_DEFAULT "truncate(16)"
    public static final java.lang.String DELETE_AVRO_COMPRESSION "write.delete.avro.compression-codec"
    public static final java.lang.String DELETE_AVRO_COMPRESSION_LEVEL "write.delete.avro.compression-level"
    public static final java.lang.String DELETE_DEFAULT_FILE_FORMAT "write.delete.format.default"
    public static final java.lang.String DELETE_DISTRIBUTION_MODE "write.delete.distribution-mode"
    public static final java.lang.String DELETE_ISOLATION_LEVEL "write.delete.isolation-level"
    public static final java.lang.String DELETE_ISOLATION_LEVEL_DEFAULT "serializable"
    public static final java.lang.String DELETE_MODE "write.delete.mode"
    public static final java.lang.String DELETE_ORC_BLOCK_SIZE_BYTES "write.delete.orc.block-size-bytes"
    public static final java.lang.String DELETE_ORC_COMPRESSION "write.delete.orc.compression-codec"
    public static final java.lang.String DELETE_ORC_COMPRESSION_STRATEGY "write.delete.orc.compression-strategy"
    public static final java.lang.String DELETE_ORC_STRIPE_SIZE_BYTES "write.delete.orc.stripe-size-bytes"
    public static final java.lang.String DELETE_ORC_WRITE_BATCH_SIZE "write.delete.orc.vectorized.batch-size"
    public static final java.lang.String DELETE_PARQUET_COMPRESSION "write.delete.parquet.compression-codec"
    public static final java.lang.String DELETE_PARQUET_COMPRESSION_LEVEL "write.delete.parquet.compression-level"
    public static final java.lang.String DELETE_PARQUET_DICT_SIZE_BYTES "write.delete.parquet.dict-size-bytes"
    public static final java.lang.String DELETE_PARQUET_PAGE_ROW_LIMIT "write.delete.parquet.page-row-limit"
    public static final java.lang.String DELETE_PARQUET_PAGE_SIZE_BYTES "write.delete.parquet.page-size-bytes"
    public static final java.lang.String DELETE_PARQUET_ROW_GROUP_CHECK_MAX_RECORD_COUNT "write.delete.parquet.row-group-check-max-record-count"
    public static final java.lang.String DELETE_PARQUET_ROW_GROUP_CHECK_MIN_RECORD_COUNT "write.delete.parquet.row-group-check-min-record-count"
    public static final java.lang.String DELETE_PARQUET_ROW_GROUP_SIZE_BYTES "write.delete.parquet.row-group-size-bytes"
    + +public static final java.lang.StringDELETE_PLANNING_MODE"read.delete-planning-mode"
    @@ -1327,425 +1395,425 @@

    org.apache.*

    true
    + public static final java.lang.StringHMS_TABLE_OWNER"hive.metastore.table.owner"HIVE_LOCK_ENABLED"engine.hive.lock-enabled"
    + +public static final booleanHIVE_LOCK_ENABLED_DEFAULTtrue
    public static final java.lang.String MANIFEST_LISTS_ENABLED "write.manifest-lists.enabled"
    public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT true
    public static final java.lang.String MANIFEST_MERGE_ENABLED "commit.manifest-merge.enabled"
    public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT true
    public static final java.lang.String MANIFEST_MIN_MERGE_COUNT "commit.manifest.min-count-to-merge"
    public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT 100
    public static final java.lang.String MANIFEST_TARGET_SIZE_BYTES "commit.manifest.target-size-bytes"
    public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT 8388608L
    public static final java.lang.String MAX_REF_AGE_MS "history.expire.max-ref-age-ms"
    public static final long MAX_REF_AGE_MS_DEFAULT 9223372036854775807L
    public static final java.lang.String MAX_SNAPSHOT_AGE_MS "history.expire.max-snapshot-age-ms"
    public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT 432000000L
    - -public static final java.lang.StringMERGE_CARDINALITY_CHECK_ENABLED"write.merge.cardinality-check.enabled"
    - -public static final booleanMERGE_CARDINALITY_CHECK_ENABLED_DEFAULTtrue
    public static final java.lang.String MERGE_DISTRIBUTION_MODE "write.merge.distribution-mode"
    public static final java.lang.String MERGE_ISOLATION_LEVEL "write.merge.isolation-level"
    public static final java.lang.String MERGE_ISOLATION_LEVEL_DEFAULT "serializable"
    public static final java.lang.String MERGE_MODE "write.merge.mode"
    public static final java.lang.String METADATA_COMPRESSION "write.metadata.compression-codec"
    public static final java.lang.String METADATA_COMPRESSION_DEFAULT "none"
    public static final java.lang.String METADATA_DELETE_AFTER_COMMIT_ENABLED "write.metadata.delete-after-commit.enabled"
    public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT false
    public static final java.lang.String METADATA_PREVIOUS_VERSIONS_MAX "write.metadata.previous-versions-max"
    public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT 100
    public static final java.lang.String METADATA_SPLIT_SIZE "read.split.metadata-target-size"
    public static final long METADATA_SPLIT_SIZE_DEFAULT 33554432L
    public static final java.lang.String METRICS_MAX_INFERRED_COLUMN_DEFAULTS "write.metadata.metrics.max-inferred-column-defaults"
    public static final int METRICS_MAX_INFERRED_COLUMN_DEFAULTS_DEFAULT 100
    public static final java.lang.String METRICS_MODE_COLUMN_CONF_PREFIX "write.metadata.metrics.column."
    public static final java.lang.String MIN_SNAPSHOTS_TO_KEEP "history.expire.min-snapshots-to-keep"
    public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT 1
    public static final java.lang.String OBJECT_STORE_ENABLED "write.object-storage.enabled"
    public static final boolean OBJECT_STORE_ENABLED_DEFAULT false
    public static final java.lang.String OBJECT_STORE_PATH "write.object-storage.path"
    public static final java.lang.String ORC_BATCH_SIZE "read.orc.vectorization.batch-size"
    public static final int ORC_BATCH_SIZE_DEFAULT 5000
    public static final java.lang.String ORC_BLOCK_SIZE_BYTES "write.orc.block-size-bytes"
    public static final long ORC_BLOCK_SIZE_BYTES_DEFAULT 268435456L
    public static final java.lang.String ORC_BLOOM_FILTER_COLUMNS "write.orc.bloom.filter.columns"
    public static final java.lang.String ORC_BLOOM_FILTER_COLUMNS_DEFAULT ""
    public static final java.lang.String ORC_BLOOM_FILTER_FPP "write.orc.bloom.filter.fpp"
    public static final double ORC_BLOOM_FILTER_FPP_DEFAULT 0.05
    public static final java.lang.String ORC_COMPRESSION "write.orc.compression-codec"
    public static final java.lang.String ORC_COMPRESSION_DEFAULT "zlib"
    public static final java.lang.String ORC_COMPRESSION_STRATEGY "write.orc.compression-strategy"
    public static final java.lang.String ORC_COMPRESSION_STRATEGY_DEFAULT "speed"
    public static final java.lang.String ORC_STRIPE_SIZE_BYTES "write.orc.stripe-size-bytes"
    public static final long ORC_STRIPE_SIZE_BYTES_DEFAULT 67108864L
    public static final java.lang.String ORC_VECTORIZATION_ENABLED "read.orc.vectorization.enabled"
    public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT false
    public static final java.lang.String ORC_WRITE_BATCH_SIZE "write.orc.vectorized.batch-size"
    public static final int ORC_WRITE_BATCH_SIZE_DEFAULT 1024
    public static final java.lang.String PARQUET_BATCH_SIZE "read.parquet.vectorization.batch-size"
    public static final int PARQUET_BATCH_SIZE_DEFAULT 5000
    public static final java.lang.String PARQUET_BLOOM_FILTER_COLUMN_ENABLED_PREFIX "write.parquet.bloom-filter-enabled.column."
    public static final java.lang.String PARQUET_BLOOM_FILTER_MAX_BYTES "write.parquet.bloom-filter-max-bytes"
    public static final int PARQUET_BLOOM_FILTER_MAX_BYTES_DEFAULT 1048576
    public static final java.lang.String PARQUET_COMPRESSION "write.parquet.compression-codec"
    public static final java.lang.String PARQUET_COMPRESSION_DEFAULT "gzip"
    + +public static final java.lang.StringPARQUET_COMPRESSION_DEFAULT_SINCE_1_4_0"zstd"
    @@ -1887,202 +1955,209 @@

    org.apache.*

    false
    + +public static final java.lang.StringSPARK_WRITE_ADVISORY_PARTITION_SIZE_BYTES"write.spark.advisory-partition-size-bytes"
    public static final java.lang.String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED "write.spark.fanout.enabled"
    public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT false
    public static final java.lang.String SPLIT_LOOKBACK "read.split.planning-lookback"
    public static final int SPLIT_LOOKBACK_DEFAULT 10
    public static final java.lang.String SPLIT_OPEN_FILE_COST "read.split.open-file-cost"
    public static final long SPLIT_OPEN_FILE_COST_DEFAULT 4194304L
    public static final java.lang.String SPLIT_SIZE "read.split.target-size"
    public static final long SPLIT_SIZE_DEFAULT 134217728L
    public static final java.lang.String UPDATE_DISTRIBUTION_MODE "write.update.distribution-mode"
    public static final java.lang.String UPDATE_ISOLATION_LEVEL "write.update.isolation-level"
    public static final java.lang.String UPDATE_ISOLATION_LEVEL_DEFAULT "serializable"
    public static final java.lang.String UPDATE_MODE "write.update.mode"
    public static final java.lang.String UPSERT_ENABLED "write.upsert.enabled"
    public static final boolean UPSERT_ENABLED_DEFAULT false
    public static final java.lang.String UUID "uuid"
    public static final java.lang.String WRITE_AUDIT_PUBLISH_ENABLED "write.wap.enabled"
    public static final java.lang.String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT "false"
    public static final java.lang.String WRITE_DATA_LOCATION "write.data.path"
    public static final java.lang.String WRITE_DISTRIBUTION_MODE "write.distribution-mode"
    public static final java.lang.String WRITE_DISTRIBUTION_MODE_HASH "hash"
    public static final java.lang.String WRITE_DISTRIBUTION_MODE_NONE "none"
    public static final java.lang.String WRITE_DISTRIBUTION_MODE_RANGE "range"
    public static final java.lang.String WRITE_FOLDER_STORAGE_LOCATION "write.folder-storage.path"
    public static final java.lang.String WRITE_LOCATION_PROVIDER_IMPL "write.location-provider.impl"
    public static final java.lang.String WRITE_METADATA_LOCATION "write.metadata.path"
    public static final java.lang.String WRITE_PARTITION_SUMMARY_LIMIT "write.summary.partition-limit"
    public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT 0
    public static final java.lang.String WRITE_TARGET_FILE_SIZE_BYTES "write.target-file-size-bytes"
    public static final long MAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT15
    @@ -2272,6 +2347,182 @@

    org.apache.*

  • +
  • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    org.apache.iceberg.actions.RewritePositionDeleteFiles 
    Modifier and TypeConstant FieldValue
    + +public static final java.lang.StringMAX_CONCURRENT_FILE_GROUP_REWRITES"max-concurrent-file-group-rewrites"
    + +public static final intMAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT5
    + +public static final java.lang.StringPARTIAL_PROGRESS_ENABLED"partial-progress.enabled"
    + +public static final booleanPARTIAL_PROGRESS_ENABLED_DEFAULTfalse
    + +public static final java.lang.StringPARTIAL_PROGRESS_MAX_COMMITS"partial-progress.max-commits"
    + +public static final intPARTIAL_PROGRESS_MAX_COMMITS_DEFAULT10
    + +public static final java.lang.StringREWRITE_JOB_ORDER"rewrite-job-order"
    +
  • +
  • + + + + + + + + + + + + + + + + + + + +
    org.apache.iceberg.actions.SizeBasedDataRewriter 
    Modifier and TypeConstant FieldValue
    + +public static final java.lang.StringDELETE_FILE_THRESHOLD"delete-file-threshold"
    + +public static final intDELETE_FILE_THRESHOLD_DEFAULT2147483647
    +
  • +
  • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    org.apache.iceberg.actions.SizeBasedFileRewriter<T extends ContentScanTask<F>,F extends ContentFile<F>> 
    Modifier and TypeConstant FieldValue
    + +public static final java.lang.StringMAX_FILE_GROUP_SIZE_BYTES"max-file-group-size-bytes"
    + +public static final longMAX_FILE_GROUP_SIZE_BYTES_DEFAULT107374182400L
    + +public static final java.lang.StringMAX_FILE_SIZE_BYTES"max-file-size-bytes"
    + +public static final doubleMAX_FILE_SIZE_DEFAULT_RATIO1.8
    + +public static final java.lang.StringMIN_FILE_SIZE_BYTES"min-file-size-bytes"
    + +public static final doubleMIN_FILE_SIZE_DEFAULT_RATIO0.75
    + +public static final java.lang.StringMIN_INPUT_FILES"min-input-files"
    + +public static final intMIN_INPUT_FILES_DEFAULT5
    + +public static final java.lang.StringREWRITE_ALL"rewrite-all"
    + +public static final booleanREWRITE_ALL_DEFAULTfalse
    + +public static final java.lang.StringTARGET_FILE_SIZE_BYTES"target-file-size-bytes"
    +
  • @@ -158,7 +170,7 @@

    Field Summary

    Method Summary

    - + @@ -227,30 +239,29 @@

    Method Summary

    + + + + - - - + + - + - + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + + + + + - + - + - - - - - + - + - + - + - + + + + + - + + + + + + + + + @@ -186,7 +204,7 @@

    Methods inherited from interface org.apache.iceberg.ScanTaskGroup

    -estimatedRowsCount, filesCount, groupingKey, sizeBytes +groupingKey + + + + + + + + + + + + diff --git a/javadoc/org/apache/iceberg/BaseFileScanTask.html b/javadoc/org/apache/iceberg/BaseFileScanTask.html index efa0a5352..4a6ef15e2 100644 --- a/javadoc/org/apache/iceberg/BaseFileScanTask.html +++ b/javadoc/org/apache/iceberg/BaseFileScanTask.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -164,52 +164,76 @@

    Method Summary

    + + + + - + + + + + - + - + - + + + + + - + + + + + - + - + - + @@ -226,7 +250,7 @@

    Methods inherited from class java.lang.Object

    Methods inherited from interface org.apache.iceberg.FileScanTask

    -asFileScanTask, filesCount, isFileScanTask, sizeBytes +asFileScanTask, isFileScanTask + + + + + + + + + + + + @@ -414,6 +493,25 @@

    residual

    + + + + diff --git a/javadoc/org/apache/iceberg/BaseMetadataTable.html b/javadoc/org/apache/iceberg/BaseMetadataTable.html index 7f6e32265..ca1fd57c7 100644 --- a/javadoc/org/apache/iceberg/BaseMetadataTable.html +++ b/javadoc/org/apache/iceberg/BaseMetadataTable.html @@ -147,16 +147,6 @@

    Constructor Summary

    - - - - @@ -279,7 +269,7 @@

    Method Summary

    @@ -432,19 +422,6 @@

    Methods inherited from interface org.apache.iceberg. - - - @@ -480,7 +457,7 @@

    table

    operations

    @Deprecated
     public TableOperations operations()
    -
    Deprecated. will be removed in 2.0.0; do not use metadata table TableOperations
    +
    Deprecated. will be removed in 1.4.0; do not use metadata table TableOperations
    Specified by:
    operations in interface HasTableOperations
    diff --git a/javadoc/org/apache/iceberg/BaseMetastoreCatalog.html b/javadoc/org/apache/iceberg/BaseMetastoreCatalog.html index 511ec5074..c3d7c81dd 100644 --- a/javadoc/org/apache/iceberg/BaseMetastoreCatalog.html +++ b/javadoc/org/apache/iceberg/BaseMetastoreCatalog.html @@ -111,7 +111,7 @@

    Class BaseMetastoreCatalog<

    Direct Known Subclasses:
    -
    DynamoDbCatalog, EcsCatalog, GlueCatalog, HadoopCatalog, HiveCatalog, JdbcCatalog, NessieCatalog, SnowflakeCatalog
    +
    DynamoDbCatalog, EcsCatalog, GlueCatalog, HadoopCatalog, HiveCatalog, InMemoryCatalog, JdbcCatalog, NessieCatalog, SnowflakeCatalog


    diff --git a/javadoc/org/apache/iceberg/BaseMetastoreTableOperations.html b/javadoc/org/apache/iceberg/BaseMetastoreTableOperations.html index daf3f1025..7a36ea17c 100644 --- a/javadoc/org/apache/iceberg/BaseMetastoreTableOperations.html +++ b/javadoc/org/apache/iceberg/BaseMetastoreTableOperations.html @@ -328,7 +328,7 @@

    Methods inherited from class java.lang.Object

    Methods inherited from interface org.apache.iceberg.TableOperations

    -encryption, io, newSnapshotId +encryption, io, newSnapshotId, requireStrictCleanup diff --git a/javadoc/org/apache/iceberg/BaseOverwriteFiles.html b/javadoc/org/apache/iceberg/BaseOverwriteFiles.html index 38bc3343f..85b741976 100644 --- a/javadoc/org/apache/iceberg/BaseOverwriteFiles.html +++ b/javadoc/org/apache/iceberg/BaseOverwriteFiles.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":42,"i52":10,"i53":42,"i54":10,"i55":10,"i56":42,"i57":10,"i58":42,"i59":10,"i60":42,"i61":10,"i62":10,"i63":10,"i64":10,"i65":42,"i66":10,"i67":42,"i68":10,"i69":42,"i70":10,"i71":42,"i72":10,"i73":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -147,7 +147,7 @@

    Constructor Summary

    Method Summary

    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected FileIOio() 
    boolean isCaseSensitive()
    Returns whether this scan is case-sensitive with respect to column names.
    protected ThisTnewRefinedScan(TableOperations ignored, - Table newTable, - Schema newSchema, - org.apache.iceberg.TableScanContext newContext) -
    Deprecated.  -
    will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
    -
    +
    ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    protected TableScan newRefinedScan(Table table, Schema schema, org.apache.iceberg.TableScanContext context) 
    ThisT option(java.lang.String property, java.lang.String value) @@ -258,115 +269,115 @@

    Method Summary

    behavior based on the incoming pair.
    protected java.util.Map<java.lang.String,java.lang.String> options() 
    protected java.util.concurrent.ExecutorService planExecutor() 
    CloseableIterable<FileScanTask> planFiles()
    Plan tasks for this scan where each task reads a single file.
    CloseableIterable<CombinedScanTask> planTasks()
    Plan balanced task groups for this scan by splitting large and combining small tasks.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema projectedSchema)
    Create a new scan from this with the schema as its projection.
    protected CloseableIterable<ManifestFile> reachableManifests(org.apache.iceberg.relocated.com.google.common.base.Function<Snapshot,java.lang.Iterable<ManifestFile>> toManifests) 
    protected ExpressionresidualFilter() 
    protected java.util.List<java.lang.String> scanColumns() 
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    protected boolean shouldIgnoreResiduals() 
    protected boolean shouldPlanWithExecutor() 
    protected booleanshouldReturnColumnStats() 
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    Table table() 
    protected TableOperationstableOps() -
    Deprecated.  -
    will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
    -
    -
    protected Schema tableSchema() 
    protected MetadataTableType tableType()
    Type of scan being performed, such as MetadataTableType.ALL_DATA_FILES when scanning a table's AllDataFilesTable.
    long targetSplitSize()
    Returns the target split size for this scan.
    TableScan useRef(java.lang.String ref)
    Create a new TableScan from this scan's configuration that will use the given reference.
    TableScan useSnapshot(long scanSnapshotId)
    Create a new TableScan from this scan's configuration that will use the given snapshot @@ -379,7 +390,7 @@

    Method Summary

    Methods inherited from class org.apache.iceberg.SnapshotScan

    -scanMetrics, snapshot, snapshotId, toString +scanMetrics, snapshot, snapshotId, toString, useSnapshotSchema @@ -416,6 +427,24 @@

    Methods inherited from interface org.apache.iceberg. + + +
      +
    • +

      SCAN_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_COLUMNS
      +
    • +
    + + + +
      +
    • +

      SCAN_WITH_STATS_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_WITH_STATS_COLUMNS
      +
    • +
    @@ -428,12 +457,21 @@

    DELETE_SCAN_COLUMNS

    -
      +
      • DELETE_SCAN_WITH_STATS_COLUMNS

        protected static final java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS
      + + + +
        +
      • +

        PLAN_SCANS_WITH_WORKER_POOL

        +
        protected static final boolean PLAN_SCANS_WITH_WORKER_POOL
        +
      • +
    @@ -656,24 +694,22 @@

    planTasks

    - +
    • -

      tableOps

      -
      @Deprecated
      -protected TableOperations tableOps()
      -
      Deprecated. will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
      +

      table

      +
      public Table table()
    - +
    • -

      table

      -
      public Table table()
      +

      io

      +
      protected FileIO io()
    @@ -712,6 +748,15 @@

    scanColumns

    protected java.util.List<java.lang.String> scanColumns()
    +
    + + +
      +
    • +

      shouldReturnColumnStats

      +
      protected boolean shouldReturnColumnStats()
      +
    • +
    @@ -721,37 +766,31 @@

    shouldIgnoreResiduals

    protected boolean shouldIgnoreResiduals()
    - +
    • -

      shouldPlanWithExecutor

      -
      protected boolean shouldPlanWithExecutor()
      +

      residualFilter

      +
      protected Expression residualFilter()
    - +
    • -

      planExecutor

      -
      protected java.util.concurrent.ExecutorService planExecutor()
      +

      shouldPlanWithExecutor

      +
      protected boolean shouldPlanWithExecutor()
    - +
    • -

      newRefinedScan

      -
      @Deprecated
      -protected ThisT newRefinedScan(TableOperations ignored,
      -                                           Table newTable,
      -                                           Schema newSchema,
      -                                           org.apache.iceberg.TableScanContext newContext)
      -
      Deprecated. will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
      +

      planExecutor

      +
      protected java.util.concurrent.ExecutorService planExecutor()
    @@ -986,7 +1025,7 @@

    splitLookback

    -
      +
      • splitOpenFileCost

        public long splitOpenFileCost()
        @@ -998,6 +1037,22 @@

        splitOpenFileCost

      + + + +
        +
      • +

        metricsReporter

        +
        public ThisT metricsReporter(MetricsReporter reporter)
        +
        Description copied from interface: Scan
        +
        Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
        +
        +
        Specified by:
        +
        metricsReporter in interface Scan<ThisT,T extends ScanTask,G extends ScanTaskGroup<T>>
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/BaseCombinedScanTask.html b/javadoc/org/apache/iceberg/BaseCombinedScanTask.html index 7311775d0..a43b84e16 100644 --- a/javadoc/org/apache/iceberg/BaseCombinedScanTask.html +++ b/javadoc/org/apache/iceberg/BaseCombinedScanTask.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -157,12 +157,30 @@

    Method Summary

    Method and Description
    longestimatedRowsCount() +
    The estimated number of rows produced by this scan task.
    +
    java.util.Collection<FileScanTask> files()
    Return the tasks in this combined task.
    intfilesCount() +
    The number of files that will be opened by this scan task.
    +
    longsizeBytes() +
    The number of bytes that should be read by this scan task.
    +
    java.lang.String toString() 
    longestimatedRowsCount() +
    The estimated number of rows produced by this scan task.
    +
    F file()
    The file to scan.
    intfilesCount() +
    The number of files that will be opened by this scan task.
    +
    long length()
    The number of bytes to scan from the ContentScanTask.start() position in the file.
    protected FileScanTask newSplitTask(FileScanTask parentTask, long offset, long length) 
    Expression residual()
    Returns the residual expression that should be applied to rows in this file scan.
    Schemaschema() +
    Return the schema for this file scan task.
    +
    protected FileScanTask self() 
    longsizeBytes() +
    The number of bytes that should be read by this scan task.
    +
    PartitionSpec spec()
    Returns the spec of the partition for this scan task
    java.lang.Iterable<ThisT> split(long targetSplitSize)
    Attempts to split this scan task into several smaller scan tasks, each close to splitSize size.
    long start()
    The starting position of this scan range in the file.
    java.lang.String toString() 
    protected BaseMetadataTable(TableOperations ignored, - Table table, - java.lang.String name) -
    Deprecated.  -
    will be removed in 1.3.0; use BaseMetadataTable(Table, String) instead.
    -
    -
    protected BaseMetadataTable(Table table, java.lang.String name) 
    TableOperations operations()
    Deprecated.  -
    will be removed in 2.0.0; do not use metadata table TableOperations
    +
    will be removed in 1.4.0; do not use metadata table TableOperations
    - + @@ -166,23 +166,22 @@

    Method Summary

    - - - + + + + + - - - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + - - - - - + - + - - - - - + - - - - - + - - - - - + - + - + - + - - - - - - - - - - - - - + - - - - - + - + @@ -1000,13 +923,13 @@

    rowFilter

    protected Expression rowFilter()
    - + @@ -1083,6 +1006,42 @@

    delete

    Add a specific data path to be deleted in the new snapshot.
    +
    + + + + + + + + + + + + + + + @@ -1103,34 +1062,25 @@

    add

    Add a delete file to the new snapshot.
    - + - + @@ -1154,26 +1104,6 @@

    validateAddedDataFiles

    -
    - - - @@ -1215,50 +1145,6 @@

    validateNoNewDeletesForDataFiles

    - - - - - - - - @@ -1282,27 +1168,6 @@

    validateNoNewDeletesForDataFiles

    - - - - @@ -1324,28 +1189,6 @@

    validateNoNewDeleteFiles

    - - - - @@ -1367,29 +1210,6 @@

    validateNoNewDeleteFiles

    - - - - @@ -1412,28 +1232,6 @@

    addedDeleteFiles

    - - - - @@ -1455,28 +1253,6 @@

    validateDeletedDataFiles

    - - - - @@ -1498,28 +1274,13 @@

    validateDeletedDataFiles

    - - - - - + @@ -1658,7 +1419,7 @@

    reportWith

  • targetBranch

    protected void targetBranch(java.lang.String branch)
    -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    Parameters:
    branch - to set as target branch
    @@ -1812,6 +1573,24 @@

    newDeleteManifestWriter

    protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec)
  • + + + + + + + + diff --git a/javadoc/org/apache/iceberg/BaseReplacePartitions.html b/javadoc/org/apache/iceberg/BaseReplacePartitions.html index 5d160d73b..5fea25ba2 100644 --- a/javadoc/org/apache/iceberg/BaseReplacePartitions.html +++ b/javadoc/org/apache/iceberg/BaseReplacePartitions.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":42,"i49":10,"i50":42,"i51":10,"i52":10,"i53":42,"i54":10,"i55":42,"i56":10,"i57":42,"i58":10,"i59":10,"i60":10,"i61":10,"i62":42,"i63":10,"i64":42,"i65":10,"i66":42,"i67":10,"i68":42,"i69":10,"i70":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -127,7 +127,7 @@

    Class BaseReplacePartition

    Method Summary

    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected voidadd(ManifestFile manifest) -
    Add all files in a manifest to the new snapshot.
    +
    add(DeleteFile file, + long dataSequenceNumber) +
    Add a delete file to the new snapshot.
    protected org.apache.iceberg.DeleteFileIndexaddedDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.addedDeleteFiles(TableMetadata, Long, Expression, PartitionSet, - Snapshot) instead
    -
    +
    protected voidadd(ManifestFile manifest) +
    Add all files in a manifest to the new snapshot.
    protected java.util.List<DataFile>addedDataFiles() 
    protected org.apache.iceberg.DeleteFileIndex addedDeleteFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -192,10 +191,6 @@

    Method Summary

    Returns matching delete files have been added to the table since a starting snapshot.
    protected java.util.List<DataFile>addedFiles() 
    OverwriteFiles addFile(DataFile file) @@ -203,239 +198,252 @@

    Method Summary

    protected booleanaddsDataFiles() 
    protected booleanaddsDeleteFiles() 
    Snapshot apply()
    Apply the pending changes and return the uncommitted changes for validation.
    java.util.List<ManifestFile> apply(TableMetadata base, Snapshot snapshot)
    Apply the update's changes to the given metadata and snapshot.
    ThisT caseSensitive(boolean isCaseSensitive) 
    protected void cleanAll() 
    protected void cleanUncommitted(java.util.Set<ManifestFile> committed)
    Clean up any uncommitted manifests that were created.
    void commit()
    Apply the pending changes and commit.
    protected CommitMetrics commitMetrics() 
    OverwriteFiles conflictDetectionFilter(Expression newConflictDetectionFilter)
    Sets a conflict detection filter used to validate concurrently added data and delete files.
    protected TableMetadata current() 
    protected PartitionSpec dataSpec() 
    protected void delete(java.lang.CharSequence path)
    Add a specific data path to be deleted in the new snapshot.
    protected void delete(DataFile file)
    Add a specific data file to be deleted in the new snapshot.
    protected void delete(DeleteFile file)
    Add a specific delete file to be deleted in the new snapshot.
    protected void deleteByRowFilter(Expression expr)
    Add a filter to match files to delete.
    OverwriteFiles deleteFile(DataFile file)
    Delete a DataFile from the table.
    protected void deleteFile(java.lang.String path) 
    protected booleandeletesDataFiles() 
    protected booleandeletesDeleteFiles() 
    ThisT deleteWith(java.util.function.Consumer<java.lang.String> deleteCallback)
    Set a callback to delete files instead of the table's default.
    protected void dropPartition(int specId, StructLike partition)
    Add a partition tuple to drop from the table during the delete phase.
    protected void failAnyDelete() 
    protected void failMissingDeletePaths() 
    protected boolean isCaseSensitive() 
    protected OutputFile manifestListPath() 
    protected ManifestReader<DeleteFile> newDeleteManifestReader(ManifestFile manifest) 
    protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec) 
    protected OutputFile newManifestOutput() 
    protected ManifestReader<DataFile> newManifestReader(ManifestFile manifest) 
    protected ManifestWriter<DataFile> newManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DeleteFile>newRollingDeleteManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DataFile>newRollingManifestWriter(PartitionSpec spec) 
    protected java.lang.String operation()
    A string that describes the action that produced the new snapshot.
    OverwriteFiles overwriteByRowFilter(Expression expr)
    Delete files that match an Expression on data rows from the table.
    protected TableMetadata refresh() 
    protected ThisT reportWith(MetricsReporter newReporter) 
    protected Expression rowFilter() 
    ThisT scanManifestsWith(java.util.concurrent.ExecutorService executorService)
    Use a particular executor to scan manifests.
    protected OverwriteFiles self() 
    ThisT set(java.lang.String property, java.lang.String value)
    Set a summary property in the snapshot produced by this update.
    protected voidsetNewFilesSequenceNumber(long sequenceNumber) setNewDataFilesDataSequenceNumber(long sequenceNumber) 
    protected long snapshotId() 
    ThisT stageOnly()
    Called to stage a snapshot in table metadata, but not update the current snapshot id.
    protected java.util.Map<java.lang.String,java.lang.String> summary() 
    protected java.lang.String targetBranch() 
    protected void targetBranch(java.lang.String branch) -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    BaseOverwriteFiles toBranch(java.lang.String branch)
    Perform operations on a particular branch
    java.lang.Object updateEvent()
    Generates update event to notify about metadata changes
    protected void validate(TableMetadata base, Snapshot parent)
    Validate the current metadata.
    protected voidvalidateAddedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression conflictDetectionFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateAddedDataFiles(TableMetadata, Long, Expression, Snapshot) - instead
    -
    -
    protected void validateAddedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -445,18 +453,7 @@

    Method Summary

    snapshot.
    protected voidvalidateAddedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateAddedDataFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateAddedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -466,23 +463,13 @@

    Method Summary

    snapshot.
    OverwriteFiles validateAddedFilesMatchOverwriteFilter()
    Signal that each file added to the table must match the overwrite expression.
    protected voidvalidateDataFilesExist(TableMetadata base, - java.lang.Long startingSnapshotId, - CharSequenceSet requiredDataFiles, - boolean skipDeletes, - Expression conflictDetectionFilter) -
    Deprecated. 
    -
    protected void validateDataFilesExist(TableMetadata base, java.lang.Long startingSnapshotId, @@ -491,18 +478,7 @@

    Method Summary

    Expression conflictDetectionFilter, Snapshot parent)
     
    protected voidvalidateDeletedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateDeletedDataFiles(TableMetadata, Long, Expression, - Snapshot) instead
    -
    -
    protected void validateDeletedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -512,18 +488,7 @@

    Method Summary

    snapshot.
    protected voidvalidateDeletedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateDeletedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -533,35 +498,25 @@

    Method Summary

    snapshot.
    OverwriteFiles validateFromSnapshot(long snapshotId)
    Set the snapshot ID used in any reads for this operation.
    OverwriteFiles validateNoConflictingData()
    Enables validation that data added concurrently does not conflict with this commit's operation.
    OverwriteFiles validateNoConflictingDeletes()
    Enables validation that deletes that happened concurrently do not conflict with this commit's operation.
    protected voidvalidateNoNewDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(org.apache.iceberg.TableMetadata, java.lang.Long, org.apache.iceberg.expressions.Expression) instead
    -
    -
    protected void validateNoNewDeleteFiles(TableMetadata base, @@ -574,17 +529,6 @@

    Method Summary

    protected voidvalidateNoNewDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateNoNewDeleteFiles(TableMetadata base, java.lang.Long startingSnapshotId, PartitionSet partitionSet, @@ -593,18 +537,7 @@

    Method Summary

    starting snapshot.
    protected voidvalidateNoNewDeletesForDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter, - java.lang.Iterable<DataFile> dataFiles) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeletesForDataFiles(org.apache.iceberg.TableMetadata, java.lang.Long, java.lang.Iterable<org.apache.iceberg.DataFile>, org.apache.iceberg.Snapshot) instead
    -
    -
    protected void validateNoNewDeletesForDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -615,17 +548,7 @@

    Method Summary

    to the table since a starting snapshot.
    protected voidvalidateNoNewDeletesForDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - java.lang.Iterable<DataFile> dataFiles) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeletesForDataFiles(org.apache.iceberg.TableMetadata, java.lang.Long, java.lang.Iterable<org.apache.iceberg.DataFile>, org.apache.iceberg.Snapshot) instead
    -
    -
    protected void validateNoNewDeletesForDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -635,7 +558,7 @@

    Method Summary

    to the table since a starting snapshot.
    protected java.util.concurrent.ExecutorService workerPool() 
    - + @@ -146,23 +146,22 @@

    Method Summary

    - - - + + + + + - - - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + - - - - - + - + - - - - - + - - - - - + - - - - - + - + - + - + - - - - - - - - - - - - - + - - - - - + - + @@ -876,13 +799,13 @@

    rowFilter

    protected Expression rowFilter()
    - + @@ -959,6 +882,42 @@

    delete

    Add a specific data path to be deleted in the new snapshot.
    +
    + + + + + + + + + + + + + + + @@ -979,34 +938,25 @@

    add

    Add a delete file to the new snapshot.
    - + - + @@ -1030,26 +980,6 @@

    validateAddedDataFiles

    -
    - - - @@ -1091,50 +1021,6 @@

    validateNoNewDeletesForDataFiles

    - - - - - - - - @@ -1158,27 +1044,6 @@

    validateNoNewDeletesForDataFiles

    - - - - @@ -1200,28 +1065,6 @@

    validateNoNewDeleteFiles

    - - - - @@ -1243,29 +1086,6 @@

    validateNoNewDeleteFiles

    - - - - @@ -1288,28 +1108,6 @@

    addedDeleteFiles

    - - - - @@ -1331,28 +1129,6 @@

    validateDeletedDataFiles

    - - - - @@ -1374,28 +1150,13 @@

    validateDeletedDataFiles

    - + - - - - @@ -1516,7 +1277,7 @@

    reportWith

  • targetBranch

    protected void targetBranch(java.lang.String branch)
    -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    Parameters:
    branch - to set as target branch
    @@ -1670,6 +1431,24 @@

    newDeleteManifestWriter

    protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec)
  • + + + + + + + + diff --git a/javadoc/org/apache/iceberg/BaseReplaceSortOrder.html b/javadoc/org/apache/iceberg/BaseReplaceSortOrder.html index 088cb40e8..f6bb3f7ae 100644 --- a/javadoc/org/apache/iceberg/BaseReplaceSortOrder.html +++ b/javadoc/org/apache/iceberg/BaseReplaceSortOrder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -146,12 +146,18 @@

    Method Summary

    + + + + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -575,7 +583,7 @@

    reportWith

  • targetBranch

    protected void targetBranch(java.lang.String branch)
    -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    Parameters:
    branch - to set as target branch
    @@ -747,6 +755,24 @@

    newDeleteManifestWriter

    protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec)
  • + + + + + + + + diff --git a/javadoc/org/apache/iceberg/BaseScanTaskGroup.html b/javadoc/org/apache/iceberg/BaseScanTaskGroup.html index 9c79948ec..0c595fdec 100644 --- a/javadoc/org/apache/iceberg/BaseScanTaskGroup.html +++ b/javadoc/org/apache/iceberg/BaseScanTaskGroup.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -158,18 +158,36 @@

    Method Summary

    + + + + + + + + - + + + + + - + @@ -182,13 +200,6 @@

    Methods inherited from class java.lang.Object

    clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait - + + + + + + + + + + + + diff --git a/javadoc/org/apache/iceberg/BaseTransaction.TransactionTableOperations.html b/javadoc/org/apache/iceberg/BaseTransaction.TransactionTableOperations.html index d905d5d69..7f26b0cc5 100644 --- a/javadoc/org/apache/iceberg/BaseTransaction.TransactionTableOperations.html +++ b/javadoc/org/apache/iceberg/BaseTransaction.TransactionTableOperations.html @@ -217,7 +217,7 @@

    Methods inherited from class java.lang.Object

    Methods inherited from interface org.apache.iceberg.TableOperations

    -temp +requireStrictCleanup, temp diff --git a/javadoc/org/apache/iceberg/BaseTransaction.html b/javadoc/org/apache/iceberg/BaseTransaction.html index 161a28577..99a706794 100644 --- a/javadoc/org/apache/iceberg/BaseTransaction.html +++ b/javadoc/org/apache/iceberg/BaseTransaction.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -162,117 +162,121 @@

    Method Summary

    + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + @@ -189,125 +181,132 @@

    Field Summary

    - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -449,6 +448,33 @@

    CACHE_ENABLED_DEFAULT

    + + + + + + + + @@ -680,6 +706,23 @@

    CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS_DEFAULT

    public static final long CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS_DEFAULT
    + + + + @@ -846,38 +889,6 @@

    USER

    - - - - - - - - diff --git a/javadoc/org/apache/iceberg/CatalogUtil.html b/javadoc/org/apache/iceberg/CatalogUtil.html index c28a05ef0..d477489b1 100644 --- a/javadoc/org/apache/iceberg/CatalogUtil.html +++ b/javadoc/org/apache/iceberg/CatalogUtil.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -191,12 +191,21 @@

    Method Summary

    + + + + - + - + - + + + + + - + - + - + - + - + @@ -339,12 +343,21 @@

    close

    - diff --git a/javadoc/org/apache/iceberg/ContentFile.html b/javadoc/org/apache/iceberg/ContentFile.html index 12f6a8d62..b287f2312 100644 --- a/javadoc/org/apache/iceberg/ContentFile.html +++ b/javadoc/org/apache/iceberg/ContentFile.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":18,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":18,"i4":6,"i5":18,"i6":6,"i7":18,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":18,"i19":6,"i20":6,"i21":6,"i22":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -162,100 +162,113 @@

    Method Summary

    + + + + - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected voidadd(ManifestFile manifest) -
    Add all files in a manifest to the new snapshot.
    +
    add(DeleteFile file, + long dataSequenceNumber) +
    Add a delete file to the new snapshot.
    protected org.apache.iceberg.DeleteFileIndexaddedDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.addedDeleteFiles(TableMetadata, Long, Expression, PartitionSet, - Snapshot) instead
    -
    +
    protected voidadd(ManifestFile manifest) +
    Add all files in a manifest to the new snapshot.
    protected java.util.List<DataFile>addedDataFiles() 
    protected org.apache.iceberg.DeleteFileIndex addedDeleteFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -172,10 +171,6 @@

    Method Summary

    Returns matching delete files have been added to the table since a starting snapshot.
    protected java.util.List<DataFile>addedFiles() 
    ReplacePartitions addFile(DataFile file) @@ -183,221 +178,234 @@

    Method Summary

    protected booleanaddsDataFiles() 
    protected booleanaddsDeleteFiles() 
    Snapshot apply()
    Apply the pending changes and return the uncommitted changes for validation.
    java.util.List<ManifestFile> apply(TableMetadata base, Snapshot snapshot)
    Apply the update's changes to the given metadata and snapshot.
    ThisT caseSensitive(boolean isCaseSensitive) 
    protected void cleanAll() 
    protected void cleanUncommitted(java.util.Set<ManifestFile> committed)
    Clean up any uncommitted manifests that were created.
    void commit()
    Apply the pending changes and commit.
    protected CommitMetrics commitMetrics() 
    protected TableMetadata current() 
    protected PartitionSpec dataSpec() 
    protected void delete(java.lang.CharSequence path)
    Add a specific data path to be deleted in the new snapshot.
    protected void delete(DataFile file)
    Add a specific data file to be deleted in the new snapshot.
    protected void delete(DeleteFile file)
    Add a specific delete file to be deleted in the new snapshot.
    protected void deleteByRowFilter(Expression expr)
    Add a filter to match files to delete.
    protected void deleteFile(java.lang.String path) 
    protected booleandeletesDataFiles() 
    protected booleandeletesDeleteFiles() 
    ThisT deleteWith(java.util.function.Consumer<java.lang.String> deleteCallback)
    Set a callback to delete files instead of the table's default.
    protected void dropPartition(int specId, StructLike partition)
    Add a partition tuple to drop from the table during the delete phase.
    protected void failAnyDelete() 
    protected void failMissingDeletePaths() 
    protected boolean isCaseSensitive() 
    protected OutputFile manifestListPath() 
    protected ManifestReader<DeleteFile> newDeleteManifestReader(ManifestFile manifest) 
    protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec) 
    protected OutputFile newManifestOutput() 
    protected ManifestReader<DataFile> newManifestReader(ManifestFile manifest) 
    protected ManifestWriter<DataFile> newManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DeleteFile>newRollingDeleteManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DataFile>newRollingManifestWriter(PartitionSpec spec) 
    protected java.lang.String operation()
    A string that describes the action that produced the new snapshot.
    protected TableMetadata refresh() 
    protected ThisT reportWith(MetricsReporter newReporter) 
    protected Expression rowFilter() 
    ThisT scanManifestsWith(java.util.concurrent.ExecutorService executorService)
    Use a particular executor to scan manifests.
    protected ReplacePartitions self() 
    ThisT set(java.lang.String property, java.lang.String value)
    Set a summary property in the snapshot produced by this update.
    protected voidsetNewFilesSequenceNumber(long sequenceNumber) setNewDataFilesDataSequenceNumber(long sequenceNumber) 
    protected long snapshotId() 
    ThisT stageOnly()
    Called to stage a snapshot in table metadata, but not update the current snapshot id.
    protected java.util.Map<java.lang.String,java.lang.String> summary() 
    protected java.lang.String targetBranch() 
    protected void targetBranch(java.lang.String branch) -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    BaseReplacePartitions toBranch(java.lang.String branch)
    Perform operations on a particular branch
    java.lang.Object updateEvent()
    Generates update event to notify about metadata changes
    void validate(TableMetadata currentMetadata, Snapshot parent)
    Validate the current metadata.
    protected voidvalidateAddedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression conflictDetectionFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateAddedDataFiles(TableMetadata, Long, Expression, Snapshot) - instead
    -
    -
    protected void validateAddedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -407,18 +415,7 @@

    Method Summary

    snapshot.
    protected voidvalidateAddedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateAddedDataFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateAddedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -428,23 +425,13 @@

    Method Summary

    snapshot.
    ReplacePartitions validateAppendOnly()
    Validate that no partitions will be replaced and the operation is append-only.
    protected voidvalidateDataFilesExist(TableMetadata base, - java.lang.Long startingSnapshotId, - CharSequenceSet requiredDataFiles, - boolean skipDeletes, - Expression conflictDetectionFilter) -
    Deprecated. 
    -
    protected void validateDataFilesExist(TableMetadata base, java.lang.Long startingSnapshotId, @@ -453,18 +440,7 @@

    Method Summary

    Expression conflictDetectionFilter, Snapshot parent)
     
    protected voidvalidateDeletedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateDeletedDataFiles(TableMetadata, Long, Expression, - Snapshot) instead
    -
    -
    protected void validateDeletedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -474,18 +450,7 @@

    Method Summary

    snapshot.
    protected voidvalidateDeletedDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateDeletedDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -495,35 +460,25 @@

    Method Summary

    snapshot.
    ReplacePartitions validateFromSnapshot(long newStartingSnapshotId)
    Set the snapshot ID used in validations for this operation.
    ReplacePartitions validateNoConflictingData()
    Enables validation that data added concurrently does not conflict with this commit's operation.
    ReplacePartitions validateNoConflictingDeletes()
    Enables validation that deletes that happened concurrently do not conflict with this commit's operation.
    protected voidvalidateNoNewDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(org.apache.iceberg.TableMetadata, java.lang.Long, org.apache.iceberg.expressions.Expression) instead
    -
    -
    protected void validateNoNewDeleteFiles(TableMetadata base, @@ -536,17 +491,6 @@

    Method Summary

    protected voidvalidateNoNewDeleteFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - PartitionSet partitionSet) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeleteFiles(TableMetadata, Long, PartitionSet, - Snapshot) instead
    -
    -
    protected void validateNoNewDeleteFiles(TableMetadata base, java.lang.Long startingSnapshotId, PartitionSet partitionSet, @@ -555,18 +499,7 @@

    Method Summary

    starting snapshot.
    protected voidvalidateNoNewDeletesForDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - Expression dataFilter, - java.lang.Iterable<DataFile> dataFiles) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeletesForDataFiles(org.apache.iceberg.TableMetadata, java.lang.Long, java.lang.Iterable<org.apache.iceberg.DataFile>, org.apache.iceberg.Snapshot) instead
    -
    -
    protected void validateNoNewDeletesForDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -577,17 +510,7 @@

    Method Summary

    to the table since a starting snapshot.
    protected voidvalidateNoNewDeletesForDataFiles(TableMetadata base, - java.lang.Long startingSnapshotId, - java.lang.Iterable<DataFile> dataFiles) -
    Deprecated.  -
    will be removed in 1.3.0; use MergingSnapshotProducer.validateNoNewDeletesForDataFiles(org.apache.iceberg.TableMetadata, java.lang.Long, java.lang.Iterable<org.apache.iceberg.DataFile>, org.apache.iceberg.Snapshot) instead
    -
    -
    protected void validateNoNewDeletesForDataFiles(TableMetadata base, java.lang.Long startingSnapshotId, @@ -597,7 +520,7 @@

    Method Summary

    to the table since a starting snapshot.
    protected java.util.concurrent.ExecutorService workerPool() 
    ReplaceSortOrdercaseSensitive(boolean caseSensitive) +
    Set case sensitivity of sort column name resolution.
    +
    void commit()
    Apply the pending changes and commit.
    ReplaceSortOrder desc(Term term, NullOrder nullOrder) @@ -256,7 +262,7 @@

    asc

    - diff --git a/javadoc/org/apache/iceberg/BaseRewriteManifests.html b/javadoc/org/apache/iceberg/BaseRewriteManifests.html index de8721f21..0534feca8 100644 --- a/javadoc/org/apache/iceberg/BaseRewriteManifests.html +++ b/javadoc/org/apache/iceberg/BaseRewriteManifests.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -222,74 +222,82 @@

    Method Summary

    newManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DeleteFile>newRollingDeleteManifestWriter(PartitionSpec spec) 
    protected RollingManifestWriter<DataFile>newRollingManifestWriter(PartitionSpec spec) 
    protected java.lang.String operation()
    A string that describes the action that produced the new snapshot.
    protected TableMetadata refresh() 
    protected ThisT reportWith(MetricsReporter newReporter) 
    RewriteManifests rewriteIf(java.util.function.Predicate<ManifestFile> pred)
    Determines which existing ManifestFile for the table should be rewritten.
    ThisT scanManifestsWith(java.util.concurrent.ExecutorService executorService)
    Use a particular executor to scan manifests.
    protected RewriteManifests self() 
    RewriteManifests set(java.lang.String property, java.lang.String value)
    Set a summary property in the snapshot produced by this update.
    protected long snapshotId() 
    ThisT stageOnly()
    Called to stage a snapshot in table metadata, but not update the current snapshot id.
    protected java.util.Map<java.lang.String,java.lang.String> summary() 
    protected java.lang.String targetBranch() 
    protected void targetBranch(java.lang.String branch) -
    * A setter for the target branch on which snapshot producer operation should be performed
    +
    A setter for the target branch on which snapshot producer operation should be performed
    protected void validate(TableMetadata currentMetadata, Snapshot snapshot)
    Validate the current metadata.
    protected java.util.concurrent.ExecutorService workerPool() 
    Method and Description
    longestimatedRowsCount() +
    The estimated number of rows produced by this scan task.
    +
    intfilesCount() +
    The number of files that will be opened by this scan task.
    +
    StructLike groupingKey()
    Returns a grouping key for this task group.
    longsizeBytes() +
    The number of bytes that should be read by this scan task.
    +
    java.util.Collection<T> tasks()
    Returns scan tasks in this group.
    java.lang.String toString() 
    TableMetadatacurrentMetadata() 
    ExpireSnapshots expireSnapshots()
    Create a new expire API to manage snapshots in this table.
    ManageSnapshots manageSnapshots()
    Create a new manage snapshot API to manage snapshots in this table.
    AppendFiles newAppend()
    Create a new append API to add files to this table.
    DeleteFiles newDelete()
    Create a new delete API to replace files in this table.
    AppendFiles newFastAppend()
    Create a new append API to add files to this table.
    OverwriteFiles newOverwrite()
    Create a new overwrite API to overwrite files by a filter expression.
    ReplacePartitions newReplacePartitions()
    Not recommended: Create a new replace partitions API to dynamically overwrite partitions in the table with new data.
    RewriteFiles newRewrite()
    Create a new rewrite API to replace files in this table.
    RowDelta newRowDelta()
    Create a new row-level delta API to remove or replace rows in existing data files.
    ReplaceSortOrder replaceSortOrder()
    Create a new ReplaceSortOrder to set a table sort order and commit the change.
    RewriteManifests rewriteManifests()
    Create a new rewrite manifests API to replace manifests for this table.
    TableMetadata startMetadata() 
    Table table()
    Return the Table that this transaction will update.
    java.lang.String tableName() 
    TableOperations underlyingOps() 
    UpdateLocation updateLocation()
    Create a new UpdateLocation to update table location.
    UpdateProperties updateProperties()
    Create a new UpdateProperties to update table properties.
    UpdateSchema updateSchema()
    Create a new UpdateSchema to alter the columns of this table.
    UpdatePartitionSpec updateSpec()
    Create a new UpdatePartitionSpec to alter the partition spec of this table.
    UpdateStatistics updateStatistics()
    Create a new update table statistics API to add or remove statistics @@ -336,6 +340,15 @@

    startMetadata

    public TableMetadata startMetadata()
    + + + +
      +
    • +

      currentMetadata

      +
      public TableMetadata currentMetadata()
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/BatchScan.html b/javadoc/org/apache/iceberg/BatchScan.html index 7bc3458bf..1bbc85add 100644 --- a/javadoc/org/apache/iceberg/BatchScan.html +++ b/javadoc/org/apache/iceberg/BatchScan.html @@ -103,7 +103,7 @@

    Interface BatchScan

    All Known Implementing Classes:
    -
    PositionDeletesTable.PositionDeletesBatchScan
    +
    PositionDeletesTable.PositionDeletesBatchScan, SparkDistributedDataScan


    @@ -168,7 +168,7 @@

    Method Summary

    Methods inherited from interface org.apache.iceberg.Scan

    -caseSensitive, filter, filter, ignoreResiduals, includeColumnStats, isCaseSensitive, option, planFiles, planTasks, planWith, project, schema, select, select, splitLookback, splitOpenFileCost, targetSplitSize +caseSensitive, filter, filter, ignoreResiduals, includeColumnStats, isCaseSensitive, metricsReporter, option, planFiles, planTasks, planWith, project, schema, select, select, splitLookback, splitOpenFileCost, targetSplitSize diff --git a/javadoc/org/apache/iceberg/CatalogProperties.html b/javadoc/org/apache/iceberg/CatalogProperties.html index 907c09de8..5a60c52bf 100644 --- a/javadoc/org/apache/iceberg/CatalogProperties.html +++ b/javadoc/org/apache/iceberg/CatalogProperties.html @@ -127,29 +127,21 @@

    Field Summary

    static java.lang.StringAUTH_DEFAULT_REFRESH_ENABLED -
    Deprecated.  -
    Will be removed in 1.3.0; Use OAuth2Properties.TOKEN_REFRESH_ENABLED to control token - refresh behavior.
    -
    -
    AUTH_SESSION_TIMEOUT_MS 
    static booleanAUTH_DEFAULT_REFRESH_ENABLED_DEFAULT -
    Deprecated.  -
    Will be removed in 1.3.0; Use OAuth2Properties.TOKEN_REFRESH_ENABLED_DEFAULT to control - default token refresh behavior.
    -
    -
    static longAUTH_SESSION_TIMEOUT_MS_DEFAULT 
    static java.lang.StringAUTH_SESSION_TIMEOUT_MS CACHE_CASE_SENSITIVE +
    Controls whether the caching catalog will cache table entries using case sensitive keys.
    +
    static longAUTH_SESSION_TIMEOUT_MS_DEFAULT static booleanCACHE_CASE_SENSITIVE_DEFAULT 
    static java.lang.String
    static java.lang.StringCLIENT_POOL_SIZE CLIENT_POOL_CACHE_KEYS +
    A comma separated list of elements used, in addition to the URI, to compose the key of + the client pool cache.
    +
    static java.lang.StringCLIENT_POOL_SIZE 
    static int CLIENT_POOL_SIZE_DEFAULT 
    static java.lang.String FILE_IO_IMPL 
    static java.lang.String IO_MANIFEST_CACHE_ENABLED
    Controls whether to use caching during manifest reads or not.
    static boolean IO_MANIFEST_CACHE_ENABLED_DEFAULT 
    static java.lang.String IO_MANIFEST_CACHE_EXPIRATION_INTERVAL_MS
    Controls the maximum duration for which an entry stays in the manifest cache.
    static long IO_MANIFEST_CACHE_EXPIRATION_INTERVAL_MS_DEFAULT 
    static java.lang.String IO_MANIFEST_CACHE_MAX_CONTENT_LENGTH
    Controls the maximum length of file to be considered for caching.
    static long IO_MANIFEST_CACHE_MAX_CONTENT_LENGTH_DEFAULT 
    static java.lang.String IO_MANIFEST_CACHE_MAX_TOTAL_BYTES
    Controls the maximum total amount of bytes to cache in manifest cache.
    static long IO_MANIFEST_CACHE_MAX_TOTAL_BYTES_DEFAULT 
    static java.lang.String LOCK_ACQUIRE_INTERVAL_MS 
    static long LOCK_ACQUIRE_INTERVAL_MS_DEFAULT 
    static java.lang.String LOCK_ACQUIRE_TIMEOUT_MS 
    static long LOCK_ACQUIRE_TIMEOUT_MS_DEFAULT 
    static java.lang.String LOCK_HEARTBEAT_INTERVAL_MS 
    static long LOCK_HEARTBEAT_INTERVAL_MS_DEFAULT 
    static java.lang.String LOCK_HEARTBEAT_THREADS 
    static int LOCK_HEARTBEAT_THREADS_DEFAULT 
    static java.lang.String LOCK_HEARTBEAT_TIMEOUT_MS 
    static long LOCK_HEARTBEAT_TIMEOUT_MS_DEFAULT 
    static java.lang.String LOCK_IMPL 
    static java.lang.String LOCK_TABLE 
    static java.lang.String METRICS_REPORTER_IMPL 
    static java.lang.String TABLE_DEFAULT_PREFIX 
    static java.lang.String TABLE_OVERRIDE_PREFIX 
    static java.lang.String URI 
    static java.lang.String USER 
    static java.lang.String WAREHOUSE_LOCATION 
    static voiddeleteFiles(FileIO io, + java.lang.Iterable<java.lang.String> files, + java.lang.String type, + boolean concurrent) +
    Helper to delete files.
    +
    static void dropTableData(FileIO io, TableMetadata metadata)
    Drops all data and metadata files referenced by TableMetadata.
    static Catalog loadCatalog(java.lang.String impl, java.lang.String catalogName, @@ -205,7 +214,7 @@

    Method Summary

    Load a custom catalog implementation.
    static FileIO loadFileIO(java.lang.String impl, java.util.Map<java.lang.String,java.lang.String> properties, @@ -213,7 +222,7 @@

    Method Summary

    Load a custom FileIO implementation.
    static MetricsReporter loadMetricsReporter(java.util.Map<java.lang.String,java.lang.String> properties)
    Load a custom MetricsReporter implementation.
    @@ -367,6 +376,26 @@

    dropTableData

    + + + +
      +
    • +

      deleteFiles

      +
      public static void deleteFiles(FileIO io,
      +                               java.lang.Iterable<java.lang.String> files,
      +                               java.lang.String type,
      +                               boolean concurrent)
      +
      Helper to delete files. Bulk deletion is used if possible.
      +
      +
      Parameters:
      +
      io - FileIO for deletes
      +
      files - files to delete
      +
      type - type of files being deleted
      +
      concurrent - controls concurrent deletion. Only applicable for non-bulk FileIO
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/ClientPoolImpl.html b/javadoc/org/apache/iceberg/ClientPoolImpl.html index 1062dab45..532113d1e 100644 --- a/javadoc/org/apache/iceberg/ClientPoolImpl.html +++ b/javadoc/org/apache/iceberg/ClientPoolImpl.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":6,"i2":10,"i3":6,"i4":10,"i5":6,"i6":10,"i7":10}; +var methods = {"i0":10,"i1":6,"i2":10,"i3":10,"i4":6,"i5":10,"i6":6,"i7":10,"i8":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -179,26 +179,30 @@

    Method Summary

    close(C client) 
    booleanisClosed() 
    protected boolean isConnectionException(java.lang.Exception exc) 
    protected abstract C newClient() 
    int poolSize() 
    protected abstract C reconnect(C client) 
    <R> R run(ClientPool.Action<R,C,E> action) 
    <R> R run(ClientPool.Action<R,C,E> action, boolean retry) 
    default java.lang.LongdataSequenceNumber() +
    Returns the data sequence number of the file.
    +
    java.util.List<java.lang.Integer> equalityFieldIds()
    Returns the set of field IDs used for equality comparison, in equality delete files.
    default java.lang.LongfileSequenceNumber() +
    Returns the file sequence number.
    +
    long fileSizeInBytes()
    Returns the file size in bytes.
    FileFormat format()
    Returns format of the file.
    java.nio.ByteBuffer keyMetadata()
    Returns metadata about how this file is encrypted, or null if the file is stored in plain text.
    java.util.Map<java.lang.Integer,java.nio.ByteBuffer> lowerBounds()
    Returns if collected, map from column ID to value lower bounds, null otherwise.
    java.util.Map<java.lang.Integer,java.lang.Long> nanValueCounts()
    Returns if collected, map from column ID to its NaN value count, null otherwise.
    java.util.Map<java.lang.Integer,java.lang.Long> nullValueCounts()
    Returns if collected, map from column ID to its null value count, null otherwise.
    StructLike partition()
    Returns partition for this file as a StructLike.
    java.lang.CharSequence path()
    Returns fully qualified path to the file, suitable for constructing a Hadoop Path.
    java.lang.Long pos()
    Returns the ordinal position of the file in a manifest, or null if it was not read from a manifest.
    long recordCount()
    Returns the number of top-level records in the file.
    default java.lang.Integer sortOrderId()
    Returns the sort order id of this file, which describes how the file is ordered.
    int specId()
    Returns id of the partition spec used for partition metadata.
    java.util.List<java.lang.Long> splitOffsets()
    Returns list of recommended split locations, if applicable, null otherwise.
    java.util.Map<java.lang.Integer,java.nio.ByteBuffer> upperBounds()
    Returns if collected, map from column ID to value upper bounds, null otherwise.
    java.util.Map<java.lang.Integer,java.lang.Long> valueCounts() -
    Returns if collected, map from column ID to the count of its non-null values, null otherwise.
    +
    Returns if collected, map from column ID to the count of its values (including null and NaN + values), null otherwise.
    @@ -371,7 +384,8 @@

    columnSizes

  • valueCounts

    java.util.Map<java.lang.Integer,java.lang.Long> valueCounts()
    -
    Returns if collected, map from column ID to the count of its non-null values, null otherwise.
    +
    Returns if collected, map from column ID to the count of its values (including null and NaN + values), null otherwise.
  • @@ -468,6 +482,46 @@

    sortOrderId

    they share the same sort order id. +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/DataFile.html b/javadoc/org/apache/iceberg/DataFile.html index 0a5688b88..e025b1245 100644 --- a/javadoc/org/apache/iceberg/DataFile.html +++ b/javadoc/org/apache/iceberg/DataFile.html @@ -241,7 +241,7 @@

    Method Summary

    Methods inherited from interface org.apache.iceberg.ContentFile

    -columnSizes, copy, copy, copyWithoutStats, fileSizeInBytes, format, keyMetadata, lowerBounds, nanValueCounts, nullValueCounts, partition, path, pos, recordCount, sortOrderId, specId, splitOffsets, upperBounds, valueCounts +columnSizes, copy, copy, copyWithoutStats, dataSequenceNumber, fileSequenceNumber, fileSizeInBytes, format, keyMetadata, lowerBounds, nanValueCounts, nullValueCounts, partition, path, pos, recordCount, sortOrderId, specId, splitOffsets, upperBounds, valueCounts diff --git a/javadoc/org/apache/iceberg/DataFiles.Builder.html b/javadoc/org/apache/iceberg/DataFiles.Builder.html index b08f84038..1bd4fecf0 100644 --- a/javadoc/org/apache/iceberg/DataFiles.Builder.html +++ b/javadoc/org/apache/iceberg/DataFiles.Builder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -174,50 +174,58 @@

    Method Summary

    DataFiles.Builder -withFileSizeInBytes(long newFileSizeInBytes)  +withEqualityFieldIds(java.util.List<java.lang.Integer> equalityIds)  DataFiles.Builder -withFormat(FileFormat newFormat)  +withFileSizeInBytes(long newFileSizeInBytes)  DataFiles.Builder -withFormat(java.lang.String newFormat)  +withFormat(FileFormat newFormat)  DataFiles.Builder -withInputFile(InputFile file)  +withFormat(java.lang.String newFormat)  DataFiles.Builder -withMetrics(Metrics metrics)  +withInputFile(InputFile file)  DataFiles.Builder -withPartition(StructLike newPartition)  +withMetrics(Metrics metrics)  DataFiles.Builder -withPartitionPath(java.lang.String newPartitionPath)  +withPartition(StructLike newPartition)  DataFiles.Builder -withPath(java.lang.String newFilePath)  +withPartitionPath(java.lang.String newPartitionPath)  DataFiles.Builder -withRecordCount(long newRecordCount)  +withPartitionValues(java.util.List<java.lang.String> partitionValues)  DataFiles.Builder -withSortOrder(SortOrder newSortOrder)  +withPath(java.lang.String newFilePath)  DataFiles.Builder -withSplitOffsets(java.util.List<java.lang.Long> offsets)  +withRecordCount(long newRecordCount)  DataFiles.Builder +withSortOrder(SortOrder newSortOrder)  + + +DataFiles.Builder +withSplitOffsets(java.util.List<java.lang.Long> offsets)  + + +DataFiles.Builder withStatus(org.apache.hadoop.fs.FileStatus stat)  @@ -367,6 +375,15 @@

    withPartitionPath

    public DataFiles.Builder withPartitionPath(java.lang.String newPartitionPath)
    + + + + @@ -385,6 +402,15 @@

    withSplitOffsets

    public DataFiles.Builder withSplitOffsets(java.util.List<java.lang.Long> offsets)
    + + + + diff --git a/javadoc/org/apache/iceberg/DataFilesTable.DataFilesTableScan.html b/javadoc/org/apache/iceberg/DataFilesTable.DataFilesTableScan.html index 5b39f8453..2ffef6cd2 100644 --- a/javadoc/org/apache/iceberg/DataFilesTable.DataFilesTableScan.html +++ b/javadoc/org/apache/iceberg/DataFilesTable.DataFilesTableScan.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":42,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":10,"i29":10,"i30":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -148,6 +148,18 @@

    Field Summary

    protected static java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS  + +protected static boolean +PLAN_SCANS_WITH_WORKER_POOL  + + +protected static java.util.List<java.lang.String> +SCAN_COLUMNS  + + +protected static java.util.List<java.lang.String> +SCAN_WITH_STATS_COLUMNS  + @@ -158,7 +170,7 @@

    Field Summary

    Method Summary

    - + @@ -219,36 +231,35 @@

    Method Summary

    + + + + - + - - - + + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + + + + + - + - + - - - - - + - + - + + + + + + + + + + + + +
    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected FileIOio() 
    boolean isCaseSensitive()
    Returns whether this scan is case-sensitive with respect to column names.
    protected CloseableIterable<ManifestFile> manifests()
    Returns an iterable of manifest files to explore for this files metadata table scan
    protected ThisTnewRefinedScan(TableOperations ignored, - Table newTable, - Schema newSchema, - org.apache.iceberg.TableScanContext newContext) -
    Deprecated.  -
    will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
    -
    +
    ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    protected TableScan newRefinedScan(Table table, Schema schema, org.apache.iceberg.TableScanContext context) 
    ThisT option(java.lang.String property, java.lang.String value) @@ -256,92 +267,92 @@

    Method Summary

    behavior based on the incoming pair.
    protected java.util.Map<java.lang.String,java.lang.String> options() 
    protected java.util.concurrent.ExecutorService planExecutor() 
    CloseableIterable<CombinedScanTask> planTasks()
    Plan balanced task groups for this scan by splitting large and combining small tasks.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema projectedSchema)
    Create a new scan from this with the schema as its projection.
    protected ExpressionresidualFilter() 
    protected java.util.List<java.lang.String> scanColumns() 
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    protected boolean shouldIgnoreResiduals() 
    protected boolean shouldPlanWithExecutor() 
    protected booleanshouldReturnColumnStats() 
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    Table table() 
    protected TableOperationstableOps() -
    Deprecated.  -
    will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
    -
    -
    protected Schema tableSchema() 
    protected MetadataTableType tableType()
    Type of scan being performed, such as MetadataTableType.ALL_DATA_FILES when scanning a table's AllDataFilesTable.
    long targetSplitSize()
    Returns the target split size for this scan.
    @@ -353,7 +364,7 @@

    Method Summary

    Methods inherited from class org.apache.iceberg.SnapshotScan

    -asOfTime, planFiles, scanMetrics, snapshot, snapshotId, toString, useRef, useSnapshot +asOfTime, planFiles, scanMetrics, snapshot, snapshotId, toString, useRef, useSnapshot, useSnapshotSchema @@ -390,6 +401,24 @@

    Methods inherited from interface org.apache.iceberg. + + +
      +
    • +

      SCAN_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_COLUMNS
      +
    • +
    + + + +
      +
    • +

      SCAN_WITH_STATS_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_WITH_STATS_COLUMNS
      +
    • +
    @@ -402,12 +431,21 @@

    DELETE_SCAN_COLUMNS

    -
      +
      • DELETE_SCAN_WITH_STATS_COLUMNS

        protected static final java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS
      + + + +
        +
      • +

        PLAN_SCANS_WITH_WORKER_POOL

        +
        protected static final boolean PLAN_SCANS_WITH_WORKER_POOL
        +
      • +
    @@ -542,24 +580,22 @@

    planTasks

    - +
    • -

      tableOps

      -
      @Deprecated
      -protected TableOperations tableOps()
      -
      Deprecated. will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
      +

      table

      +
      public Table table()
    - +
    • -

      table

      -
      public Table table()
      +

      io

      +
      protected FileIO io()
    @@ -598,6 +634,15 @@

    scanColumns

    protected java.util.List<java.lang.String> scanColumns()
    +
    + + +
      +
    • +

      shouldReturnColumnStats

      +
      protected boolean shouldReturnColumnStats()
      +
    • +
    @@ -607,37 +652,31 @@

    shouldIgnoreResiduals

    protected boolean shouldIgnoreResiduals()
    - +
    • -

      shouldPlanWithExecutor

      -
      protected boolean shouldPlanWithExecutor()
      +

      residualFilter

      +
      protected Expression residualFilter()
    - +
    • -

      planExecutor

      -
      protected java.util.concurrent.ExecutorService planExecutor()
      +

      shouldPlanWithExecutor

      +
      protected boolean shouldPlanWithExecutor()
    - +
    • -

      newRefinedScan

      -
      @Deprecated
      -protected ThisT newRefinedScan(TableOperations ignored,
      -                                           Table newTable,
      -                                           Schema newSchema,
      -                                           org.apache.iceberg.TableScanContext newContext)
      -
      Deprecated. will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
      +

      planExecutor

      +
      protected java.util.concurrent.ExecutorService planExecutor()
    @@ -872,7 +911,7 @@

    splitLookback

    -
      +
      • splitOpenFileCost

        public long splitOpenFileCost()
        @@ -884,6 +923,22 @@

        splitOpenFileCost

      + + + +
        +
      • +

        metricsReporter

        +
        public ThisT metricsReporter(MetricsReporter reporter)
        +
        Description copied from interface: Scan
        +
        Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
        +
        +
        Specified by:
        +
        metricsReporter in interface Scan<ThisT,T extends ScanTask,G extends ScanTaskGroup<T>>
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/DataTableScan.html b/javadoc/org/apache/iceberg/DataTableScan.html index 22e3248c1..7eb9c01ac 100644 --- a/javadoc/org/apache/iceberg/DataTableScan.html +++ b/javadoc/org/apache/iceberg/DataTableScan.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":42,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":10,"i28":10,"i29":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -144,6 +144,18 @@

    Field Summary

    protected static java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS 
    protected static booleanPLAN_SCANS_WITH_WORKER_POOL 
    protected static java.util.List<java.lang.String>SCAN_COLUMNS 
    protected static java.util.List<java.lang.String>SCAN_WITH_STATS_COLUMNS 
    @@ -160,28 +172,6 @@

    Constructor Summary

    Constructor and Description -  -DataTableScan(TableOperations ignored, - Table table) -
    Deprecated.  -
    will be removed in 1.3.0; use DataTableScan(Table, Schema, TableScanContext) - instead.
    -
    - - - -protected -DataTableScan(TableOperations ignored, - Table table, - Schema schema, - org.apache.iceberg.TableScanContext context) -
    Deprecated.  -
    will be removed in 1.3.0; use DataTableScan(Table, Schema, TableScanContext) - instead.
    -
    - - - protected DataTableScan(Table table, Schema schema, @@ -197,7 +187,7 @@

    Constructor Summary

    Method Summary

    - + @@ -258,30 +248,29 @@

    Method Summary

    + + + + - - - + + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + + + + + - + - + - - - - - + - + - - - + + +
    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected FileIOio() 
    boolean isCaseSensitive()
    Returns whether this scan is case-sensitive with respect to column names.
    protected ThisTnewRefinedScan(TableOperations ignored, - Table newTable, - Schema newSchema, - org.apache.iceberg.TableScanContext newContext) -
    Deprecated.  -
    will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
    -
    +
    ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    protected TableScan newRefinedScan(Table table, Schema schema, org.apache.iceberg.TableScanContext context) 
    ThisT option(java.lang.String property, java.lang.String value) @@ -289,96 +278,93 @@

    Method Summary

    behavior based on the incoming pair.
    protected java.util.Map<java.lang.String,java.lang.String> options() 
    protected java.util.concurrent.ExecutorService planExecutor() 
    CloseableIterable<CombinedScanTask> planTasks()
    Plan balanced task groups for this scan by splitting large and combining small tasks.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema projectedSchema)
    Create a new scan from this with the schema as its projection.
    protected ExpressionresidualFilter() 
    protected java.util.List<java.lang.String> scanColumns() 
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    protected boolean shouldIgnoreResiduals() 
    protected boolean shouldPlanWithExecutor() 
    protected booleanshouldReturnColumnStats() 
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    Table table() 
    protected TableOperationstableOps() -
    Deprecated.  -
    will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
    -
    -
    protected Schema tableSchema() 
    long targetSplitSize()
    Returns the target split size for this scan.
    TableScanuseSnapshot(long scanSnapshotId) -
    Create a new TableScan from this scan's configuration that will use the given snapshot - by ID.
    -
    protected booleanuseSnapshotSchema() 
    @@ -423,6 +409,24 @@

    Methods inherited from interface org.apache.iceberg. + + + + + + + @@ -435,12 +439,21 @@

    DELETE_SCAN_COLUMNS

    - @@ -449,34 +462,6 @@

    DELETE_SCAN_WITH_STATS_COLUMNS

    Constructor Detail

    - - - - - - - - @@ -540,25 +525,16 @@

    appendsAfter

    - + @@ -606,24 +582,22 @@

    planTasks

    - + - + @@ -662,6 +636,15 @@

    scanColumns

    protected java.util.List<java.lang.String> scanColumns()
    +
    + + + @@ -671,37 +654,31 @@

    shouldIgnoreResiduals

    protected boolean shouldIgnoreResiduals()
    - + - + - + @@ -951,7 +928,7 @@

    splitLookback

    - diff --git a/javadoc/org/apache/iceberg/DataTask.html b/javadoc/org/apache/iceberg/DataTask.html index c1eb65c59..16eabf331 100644 --- a/javadoc/org/apache/iceberg/DataTask.html +++ b/javadoc/org/apache/iceberg/DataTask.html @@ -148,7 +148,7 @@

    Method Summary

    Methods inherited from interface org.apache.iceberg.FileScanTask

    -asFileScanTask, deletes, filesCount, isFileScanTask, sizeBytes +asFileScanTask, deletes, filesCount, isFileScanTask, schema, sizeBytes diff --git a/javadoc/org/apache/iceberg/DeleteFiles.html b/javadoc/org/apache/iceberg/DeleteFiles.html index f809ae0f8..19e9400c2 100644 --- a/javadoc/org/apache/iceberg/DeleteFiles.html +++ b/javadoc/org/apache/iceberg/DeleteFiles.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":18,"i3":6}; +var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":18}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -158,6 +158,13 @@

    Method Summary

    Delete files that match an Expression on data rows from the table.
    + +default DeleteFiles +validateFilesExist() +
    Enables validation that any files that are part of the deletion still exist when committing the + operation.
    + + @@ -158,7 +170,7 @@

    Field Summary

    Method Summary

    - + @@ -219,36 +231,35 @@

    Method Summary

    + + + + - + - - - + + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + + + + + - + - + - - - - - + - + - + + + + +
    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    protected FileIOio() 
    boolean isCaseSensitive()
    Returns whether this scan is case-sensitive with respect to column names.
    protected CloseableIterable<ManifestFile> manifests()
    Returns an iterable of manifest files to explore for this files metadata table scan
    protected ThisTnewRefinedScan(TableOperations ignored, - Table newTable, - Schema newSchema, - org.apache.iceberg.TableScanContext newContext) -
    Deprecated.  -
    will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
    -
    +
    ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    protected TableScan newRefinedScan(Table table, Schema schema, org.apache.iceberg.TableScanContext context) 
    ThisT option(java.lang.String property, java.lang.String value) @@ -256,92 +267,92 @@

    Method Summary

    behavior based on the incoming pair.
    protected java.util.Map<java.lang.String,java.lang.String> options() 
    protected java.util.concurrent.ExecutorService planExecutor() 
    CloseableIterable<CombinedScanTask> planTasks()
    Plan balanced task groups for this scan by splitting large and combining small tasks.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema projectedSchema)
    Create a new scan from this with the schema as its projection.
    protected ExpressionresidualFilter() 
    protected java.util.List<java.lang.String> scanColumns() 
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    protected boolean shouldIgnoreResiduals() 
    protected boolean shouldPlanWithExecutor() 
    protected booleanshouldReturnColumnStats() 
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    Table table() 
    protected TableOperationstableOps() -
    Deprecated.  -
    will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
    -
    -
    protected Schema tableSchema() 
    protected MetadataTableType tableType()
    Type of scan being performed, such as MetadataTableType.ALL_DATA_FILES when scanning a table's AllDataFilesTable.
    long targetSplitSize()
    Returns the target split size for this scan.
    @@ -353,7 +364,7 @@

    Method Summary

    Methods inherited from class org.apache.iceberg.SnapshotScan

    -asOfTime, planFiles, scanMetrics, snapshot, snapshotId, toString, useRef, useSnapshot +asOfTime, planFiles, scanMetrics, snapshot, snapshotId, toString, useRef, useSnapshot, useSnapshotSchema @@ -390,6 +401,24 @@

    Methods inherited from interface org.apache.iceberg. + + +
      +
    • +

      SCAN_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_COLUMNS
      +
    • +
    + + + +
      +
    • +

      SCAN_WITH_STATS_COLUMNS

      +
      protected static final java.util.List<java.lang.String> SCAN_WITH_STATS_COLUMNS
      +
    • +
    @@ -402,12 +431,21 @@

    DELETE_SCAN_COLUMNS

    -
      +
      • DELETE_SCAN_WITH_STATS_COLUMNS

        protected static final java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS
      + + + +
        +
      • +

        PLAN_SCANS_WITH_WORKER_POOL

        +
        protected static final boolean PLAN_SCANS_WITH_WORKER_POOL
        +
      • +
    @@ -542,24 +580,22 @@

    planTasks

    - +
    • -

      tableOps

      -
      @Deprecated
      -protected TableOperations tableOps()
      -
      Deprecated. will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
      +

      table

      +
      public Table table()
    - +
    • -

      table

      -
      public Table table()
      +

      io

      +
      protected FileIO io()
    @@ -598,6 +634,15 @@

    scanColumns

    protected java.util.List<java.lang.String> scanColumns()
    +
    + + +
      +
    • +

      shouldReturnColumnStats

      +
      protected boolean shouldReturnColumnStats()
      +
    • +
    @@ -607,37 +652,31 @@

    shouldIgnoreResiduals

    protected boolean shouldIgnoreResiduals()
    - +
    • -

      shouldPlanWithExecutor

      -
      protected boolean shouldPlanWithExecutor()
      +

      residualFilter

      +
      protected Expression residualFilter()
    - +
    • -

      planExecutor

      -
      protected java.util.concurrent.ExecutorService planExecutor()
      +

      shouldPlanWithExecutor

      +
      protected boolean shouldPlanWithExecutor()
    - +
    • -

      newRefinedScan

      -
      @Deprecated
      -protected ThisT newRefinedScan(TableOperations ignored,
      -                                           Table newTable,
      -                                           Schema newSchema,
      -                                           org.apache.iceberg.TableScanContext newContext)
      -
      Deprecated. will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
      +

      planExecutor

      +
      protected java.util.concurrent.ExecutorService planExecutor()
    @@ -872,7 +911,7 @@

    splitLookback

    -
      +
      • splitOpenFileCost

        public long splitOpenFileCost()
        @@ -884,6 +923,22 @@

        splitOpenFileCost

      + + + +
        +
      • +

        metricsReporter

        +
        public ThisT metricsReporter(MetricsReporter reporter)
        +
        Description copied from interface: Scan
        +
        Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
        +
        +
        Specified by:
        +
        metricsReporter in interface Scan<ThisT,T extends ScanTask,G extends ScanTaskGroup<T>>
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/FileMetadata.Builder.html b/javadoc/org/apache/iceberg/FileMetadata.Builder.html index 28b650ea4..df80dfbd7 100644 --- a/javadoc/org/apache/iceberg/FileMetadata.Builder.html +++ b/javadoc/org/apache/iceberg/FileMetadata.Builder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -205,6 +205,10 @@

    Method Summary

    FileMetadata.BuilderwithSplitOffsets(java.util.List<java.lang.Long> offsets) 
    FileMetadata.Builder withStatus(org.apache.hadoop.fs.FileStatus stat) 
    @@ -364,6 +368,15 @@

    withMetrics

    public FileMetadata.Builder withMetrics(Metrics metrics)
    + + + + diff --git a/javadoc/org/apache/iceberg/FileScanTask.html b/javadoc/org/apache/iceberg/FileScanTask.html index 1f0cd7466..ad63bc440 100644 --- a/javadoc/org/apache/iceberg/FileScanTask.html +++ b/javadoc/org/apache/iceberg/FileScanTask.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":18,"i1":6,"i2":18,"i3":18,"i4":18}; +var methods = {"i0":18,"i1":6,"i2":18,"i3":18,"i4":18,"i5":18}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -48,7 +48,7 @@ + + +
    + + + + + + + +
    + + + + diff --git a/javadoc/org/apache/iceberg/MetadataUpdate.SetDefaultPartitionSpec.html b/javadoc/org/apache/iceberg/MetadataUpdate.SetDefaultPartitionSpec.html index 23c9b8287..dcae5e819 100644 --- a/javadoc/org/apache/iceberg/MetadataUpdate.SetDefaultPartitionSpec.html +++ b/javadoc/org/apache/iceberg/MetadataUpdate.SetDefaultPartitionSpec.html @@ -47,7 +47,7 @@ @@ -146,28 +146,36 @@

    Nested Class Summary

    static class  -MetadataUpdate.AssignUUID  +MetadataUpdate.AddViewVersion  static class  -MetadataUpdate.RemoveProperties  +MetadataUpdate.AssignUUID  static class  -MetadataUpdate.RemoveSnapshot  +MetadataUpdate.RemoveProperties  static class  -MetadataUpdate.RemoveSnapshotRef  +MetadataUpdate.RemoveSnapshot  static class  -MetadataUpdate.RemoveStatistics  +MetadataUpdate.RemoveSnapshotRef  static class  +MetadataUpdate.RemoveStatistics  + + +static class  MetadataUpdate.SetCurrentSchema  + +static class  +MetadataUpdate.SetCurrentViewVersion  + static class  MetadataUpdate.SetDefaultPartitionSpec  @@ -206,15 +214,19 @@

    Nested Class Summary

    Method Summary

    - + - + + + + +
    All Methods Instance Methods Abstract Methods All Methods Instance Methods Default Methods 
    Modifier and Type Method and Description
    voiddefault void applyTo(TableMetadata.Builder metadataBuilder) 
    default voidapplyTo(ViewMetadata.Builder viewMetadataBuilder) 
    @@ -233,10 +245,19 @@

    Method Detail

    + + + + diff --git a/javadoc/org/apache/iceberg/MetricsUtil.html b/javadoc/org/apache/iceberg/MetricsUtil.html index 58d400d7f..811842321 100644 --- a/javadoc/org/apache/iceberg/MetricsUtil.html +++ b/javadoc/org/apache/iceberg/MetricsUtil.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -185,6 +185,20 @@

    Method Summary

    Method and Description +static Metrics +copyWithoutFieldCounts(Metrics metrics, + java.util.Set<java.lang.Integer> excludedFieldIds) +
    Copies a metrics object without value, NULL and NaN counts for given fields.
    + + + +static Metrics +copyWithoutFieldCountsAndBounds(Metrics metrics, + java.util.Set<java.lang.Integer> excludedFieldIds) +
    Copies a metrics object without counts and bounds for given fields.
    + + + static java.util.Map<java.lang.Integer,java.lang.Long> createNanValueCounts(java.util.stream.Stream<FieldMetrics<?>> fieldMetrics, MetricsConfig metricsConfig, @@ -193,7 +207,7 @@

    Method Summary

    metrics config. - + static MetricsModes.MetricsMode metricsMode(Schema inputSchema, MetricsConfig metricsConfig, @@ -201,14 +215,14 @@

    Method Summary

    Extract MetricsMode for the given field id from metrics config.
    - + static Schema readableMetricsSchema(Schema dataTableSchema, Schema metadataTableSchema)
    Calculates a dynamic schema for readable_metrics to add to metadata tables.
    - + static MetricsUtil.ReadableMetricsStruct readableMetricsStruct(Schema schema, ContentFile<?> file, @@ -268,6 +282,40 @@

    READABLE_METRICS

    Method Detail

    + + + +
      +
    • +

      copyWithoutFieldCounts

      +
      public static Metrics copyWithoutFieldCounts(Metrics metrics,
      +                                             java.util.Set<java.lang.Integer> excludedFieldIds)
      +
      Copies a metrics object without value, NULL and NaN counts for given fields.
      +
      +
      Parameters:
      +
      excludedFieldIds - field IDs for which the counts must be dropped
      +
      Returns:
      +
      a new metrics object without counts for given fields
      +
      +
    • +
    + + + +
      +
    • +

      copyWithoutFieldCountsAndBounds

      +
      public static Metrics copyWithoutFieldCountsAndBounds(Metrics metrics,
      +                                                      java.util.Set<java.lang.Integer> excludedFieldIds)
      +
      Copies a metrics object without counts and bounds for given fields.
      +
      +
      Parameters:
      +
      excludedFieldIds - field IDs for which the counts and bounds must be dropped
      +
      Returns:
      +
      a new metrics object without lower and upper bounds for given fields
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/MicroBatches.MicroBatchBuilder.html b/javadoc/org/apache/iceberg/MicroBatches.MicroBatchBuilder.html index 51fbc845c..3d84d57ac 100644 --- a/javadoc/org/apache/iceberg/MicroBatches.MicroBatchBuilder.html +++ b/javadoc/org/apache/iceberg/MicroBatches.MicroBatchBuilder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -142,6 +142,13 @@

    Method Summary

    boolean scanAllFiles)
      +MicroBatches.MicroBatch +generate(long startFileIndex, + long endFileIndex, + long targetSizeInBytes, + boolean scanAllFiles)  + + MicroBatches.MicroBatchBuilder specsById(java.util.Map<java.lang.Integer,PartitionSpec> specs)  @@ -188,10 +195,22 @@

    specsById

    +
      +
    • +

      generate

      +
      public MicroBatches.MicroBatch generate(long startFileIndex,
      +                                        long targetSizeInBytes,
      +                                        boolean scanAllFiles)
      +
    • +
    + + +
    + + diff --git a/javadoc/org/apache/iceberg/RewriteFiles.html b/javadoc/org/apache/iceberg/RewriteFiles.html index 2da471999..714cd469c 100644 --- a/javadoc/org/apache/iceberg/RewriteFiles.html +++ b/javadoc/org/apache/iceberg/RewriteFiles.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":18,"i1":6,"i2":6,"i3":6}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; +var methods = {"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":50,"i7":38,"i8":38,"i9":6}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -113,7 +113,10 @@

    Interface RewriteFiles

    When committing, these changes will be applied to the latest table snapshot. Commit conflicts will be resolved by applying the changes to the new latest snapshot and reattempting the commit. If any of the deleted files are no longer in the latest snapshot when reattempting, the commit - will throw a ValidationException. + will throw a ValidationException. + +

    Note that the new state of the table after each rewrite must be logically equivalent to the + original table state. @@ -127,36 +130,79 @@

    Interface RewriteFiles

    Method Summary

    - + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + + + + + - + - + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + +
    All Methods Instance Methods Abstract Methods Default Methods All Methods Instance Methods Abstract Methods Default Methods Deprecated Methods 
    Modifier and Type Method and Description
    default RewriteFilesaddFile(DataFile dataFile) +
    Add a new data file.
    +
    default RewriteFilesaddFile(DeleteFile deleteFile) +
    Add a new delete file.
    +
    default RewriteFilesaddFile(DeleteFile deleteFile, + long dataSequenceNumber) +
    Add a new delete file with the given data sequence number.
    +
    default RewriteFilesdataSequenceNumber(long sequenceNumber) +
    Configure the data sequence number for this rewrite operation.
    +
    default RewriteFilesdeleteFile(DataFile dataFile) +
    Remove a data file from the current table state.
    +
    default RewriteFilesdeleteFile(DeleteFile deleteFile) +
    Remove a delete file from the table state.
    +
    default RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete, java.util.Set<DataFile> filesToAdd) -
    Add a rewrite that replaces one set of data files with another set that contains the same data.
    +
    Deprecated.  +
    since 1.3.0, will be removed in 2.0.0
    +
    RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete, java.util.Set<DataFile> filesToAdd, long sequenceNumber) -
    Add a rewrite that replaces one set of data files with another set that contains the same data.
    +
    Deprecated.  +
    since 1.3.0, will be removed in 2.0.0
    +
    RewriteFiles rewriteFiles(java.util.Set<DataFile> dataFilesToReplace, java.util.Set<DeleteFile> deleteFilesToReplace, java.util.Set<DataFile> dataFilesToAdd, java.util.Set<DeleteFile> deleteFilesToAdd) -
    Add a rewrite that replaces one set of files with another set that contains the same data.
    +
    Deprecated.  +
    since 1.3.0, will be removed in 2.0.0
    +
    RewriteFiles validateFromSnapshot(long snapshotId)
    Set the snapshot ID used in any reads for this operation.
    @@ -191,14 +237,140 @@

    Methods inherited from interface org.apache.iceberg. + + +
      +
    • +

      deleteFile

      +
      default RewriteFiles deleteFile(DataFile dataFile)
      +
      Remove a data file from the current table state. + +

      This rewrite operation may change the size or layout of the data files. When applicable, it + is also recommended to discard already deleted records while rewriting data files. However, the + set of live data records must never change.

      +
      +
      Parameters:
      +
      dataFile - a rewritten data file
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    + + + +
      +
    • +

      deleteFile

      +
      default RewriteFiles deleteFile(DeleteFile deleteFile)
      +
      Remove a delete file from the table state. + +

      This rewrite operation may change the size or layout of the delete files. When applicable, + it is also recommended to discard delete records for files that are no longer part of the table + state. However, the set of applicable delete records must never change.

      +
      +
      Parameters:
      +
      deleteFile - a rewritten delete file
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    + + + +
      +
    • +

      addFile

      +
      default RewriteFiles addFile(DataFile dataFile)
      +
      Add a new data file. + +

      This rewrite operation may change the size or layout of the data files. When applicable, it + is also recommended to discard already deleted records while rewriting data files. However, the + set of live data records must never change.

      +
      +
      Parameters:
      +
      dataFile - a new data file
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    + + + +
      +
    • +

      addFile

      +
      default RewriteFiles addFile(DeleteFile deleteFile)
      +
      Add a new delete file. + +

      This rewrite operation may change the size or layout of the delete files. When applicable, + it is also recommended to discard delete records for files that are no longer part of the table + state. However, the set of applicable delete records must never change.

      +
      +
      Parameters:
      +
      deleteFile - a new delete file
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    + + + +
      +
    • +

      addFile

      +
      default RewriteFiles addFile(DeleteFile deleteFile,
      +                             long dataSequenceNumber)
      +
      Add a new delete file with the given data sequence number. + +

      This rewrite operation may change the size or layout of the delete files. When applicable, + it is also recommended to discard delete records for files that are no longer part of the table + state. However, the set of applicable delete records must never change. + +

      To ensure equivalence in the set of applicable delete records, the sequence number of the + delete file must be the max sequence number of the delete files that it is replacing. Rewriting + equality deletes that belong to different sequence numbers is not allowed.

      +
      +
      Parameters:
      +
      deleteFile - a new delete file
      +
      dataSequenceNumber - data sequence number to append on the file
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    + + + +
      +
    • +

      dataSequenceNumber

      +
      default RewriteFiles dataSequenceNumber(long sequenceNumber)
      +
      Configure the data sequence number for this rewrite operation. This data sequence number will + be used for all new data files that are added in this rewrite. This method is helpful to avoid + commit conflicts between data compaction and adding equality deletes.
      +
      +
      Parameters:
      +
      sequenceNumber - a data sequence number
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    • rewriteFiles

      -
      default RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete,
      -                                  java.util.Set<DataFile> filesToAdd)
      +
      @Deprecated
      +default RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete,
      +                                              java.util.Set<DataFile> filesToAdd)
      +
      Deprecated. since 1.3.0, will be removed in 2.0.0
      Add a rewrite that replaces one set of data files with another set that contains the same data.
      Parameters:
      @@ -215,9 +387,11 @@

      rewriteFiles

      • rewriteFiles

        -
        RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete,
        -                          java.util.Set<DataFile> filesToAdd,
        -                          long sequenceNumber)
        +
        @Deprecated
        +RewriteFiles rewriteFiles(java.util.Set<DataFile> filesToDelete,
        +                                      java.util.Set<DataFile> filesToAdd,
        +                                      long sequenceNumber)
        +
        Deprecated. since 1.3.0, will be removed in 2.0.0
        Add a rewrite that replaces one set of data files with another set that contains the same data. The sequence number provided will be used for all the data files added.
        @@ -236,10 +410,12 @@

        rewriteFiles

        • rewriteFiles

          -
          RewriteFiles rewriteFiles(java.util.Set<DataFile> dataFilesToReplace,
          -                          java.util.Set<DeleteFile> deleteFilesToReplace,
          -                          java.util.Set<DataFile> dataFilesToAdd,
          -                          java.util.Set<DeleteFile> deleteFilesToAdd)
          +
          @Deprecated
          +RewriteFiles rewriteFiles(java.util.Set<DataFile> dataFilesToReplace,
          +                                      java.util.Set<DeleteFile> deleteFilesToReplace,
          +                                      java.util.Set<DataFile> dataFilesToAdd,
          +                                      java.util.Set<DeleteFile> deleteFilesToAdd)
          +
          Deprecated. since 1.3.0, will be removed in 2.0.0
          Add a rewrite that replaces one set of files with another set that contains the same data.
          Parameters:
          diff --git a/javadoc/org/apache/iceberg/RewriteManifests.html b/javadoc/org/apache/iceberg/RewriteManifests.html index 419b685b6..bf9aac803 100644 --- a/javadoc/org/apache/iceberg/RewriteManifests.html +++ b/javadoc/org/apache/iceberg/RewriteManifests.html @@ -48,7 +48,7 @@

    default ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    +
    ThisT option(java.lang.String property, java.lang.String value) @@ -179,61 +186,61 @@

    Method Summary

    behavior based on the incoming pair.
    CloseableIterable<T> planFiles()
    Plan tasks for this scan where each task reads a single file.
    CloseableIterable<G> planTasks()
    Plan balanced task groups for this scan by splitting large and combining small tasks.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema schema)
    Create a new scan from this with the schema as its projection.
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    default ThisT select(java.lang.String... columns)
    Create a new scan from this that will read the given columns.
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    long targetSplitSize()
    Returns the target split size for this scan.
    @@ -509,13 +516,24 @@

    splitLookback

    -
      +
      • splitOpenFileCost

        long splitOpenFileCost()
        Returns the split open file cost for this scan.
      + + + +
        +
      • +

        metricsReporter

        +
        default ThisT metricsReporter(MetricsReporter reporter)
        +
        Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/Snapshot.html b/javadoc/org/apache/iceberg/Snapshot.html index 106586003..62c9108ff 100644 --- a/javadoc/org/apache/iceberg/Snapshot.html +++ b/javadoc/org/apache/iceberg/Snapshot.html @@ -383,7 +383,8 @@

    addedDataFiles

    Return all data files added to the table in this snapshot.

    The files returned include the following columns: file_path, file_format, partition, - record_count, and file_size_in_bytes. Other columns will be null.

    + record_count, and file_size_in_bytes. Data and file sequence number are populated. Other + columns will be null.
    Parameters:
    io - a FileIO instance used for reading files from storage
    @@ -402,7 +403,8 @@

    removedDataFiles

    Return all data files removed from the table in this snapshot.

    The files returned include the following columns: file_path, file_format, partition, - record_count, and file_size_in_bytes. Other columns will be null.

    + record_count, and file_size_in_bytes. Data and file sequence number are populated. Other + columns will be null.
    Parameters:
    io - a FileIO instance used for reading files from storage
    diff --git a/javadoc/org/apache/iceberg/SnapshotManager.html b/javadoc/org/apache/iceberg/SnapshotManager.html index a6b645202..3d317f768 100644 --- a/javadoc/org/apache/iceberg/SnapshotManager.html +++ b/javadoc/org/apache/iceberg/SnapshotManager.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -153,19 +153,25 @@

    Method Summary

    ManageSnapshotscreateBranch(java.lang.String name) +
    Create a new branch.
    +
    ManageSnapshots createBranch(java.lang.String name, long snapshotId)
    Create a new branch pointing to the given snapshot id.
    ManageSnapshots createTag(java.lang.String name, long snapshotId)
    Create a new tag pointing to the given snapshot id
    ManageSnapshots fastForwardBranch(java.lang.String name, java.lang.String source) @@ -173,79 +179,79 @@

    Method Summary

    ancestor of source.
    ManageSnapshots removeBranch(java.lang.String name)
    Remove a branch by name
    ManageSnapshots removeTag(java.lang.String name)
    Remove the tag with the given name.
    ManageSnapshots renameBranch(java.lang.String name, java.lang.String newName)
    Rename a branch
    ManageSnapshots replaceBranch(java.lang.String name, long snapshotId)
    Replaces the branch with the given name to point to the specified snapshot
    ManageSnapshots replaceBranch(java.lang.String name, java.lang.String source)
    Replaces the branch with the given name to point to the source snapshot.
    ManageSnapshots replaceTag(java.lang.String name, long snapshotId)
    Replaces the tag with the given name to point to the specified snapshot.
    ManageSnapshots rollbackTo(long snapshotId)
    Rollback table's state to a specific Snapshot identified by id.
    ManageSnapshots rollbackToTime(long timestampMillis)
    Roll this table's data back to the last Snapshot before the given timestamp.
    ManageSnapshots setCurrentSnapshot(long snapshotId)
    Roll this table's data back to a specific Snapshot identified by id.
    ManageSnapshots setMaxRefAgeMs(java.lang.String name, long maxRefAgeMs)
    Updates the retention policy for a reference.
    ManageSnapshots setMaxSnapshotAgeMs(java.lang.String name, long maxSnapshotAgeMs)
    Updates the max snapshot age for a branch.
    ManageSnapshots setMinSnapshotsToKeep(java.lang.String name, int minSnapshotsToKeep) @@ -357,6 +363,26 @@

    rollbackTo

    + + + +
      +
    • +

      createBranch

      +
      public ManageSnapshots createBranch(java.lang.String name)
      +
      Description copied from interface: ManageSnapshots
      +
      Create a new branch. The branch will point to current snapshot if the current snapshot is not + NULL. Otherwise, the branch will point to a newly created empty snapshot.
      +
      +
      Specified by:
      +
      createBranch in interface ManageSnapshots
      +
      Parameters:
      +
      name - branch name
      +
      Returns:
      +
      this for method chaining
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/SnapshotScan.html b/javadoc/org/apache/iceberg/SnapshotScan.html index ad88a3c8d..c53beafa6 100644 --- a/javadoc/org/apache/iceberg/SnapshotScan.html +++ b/javadoc/org/apache/iceberg/SnapshotScan.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":6,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":42,"i10":6,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":42,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":6,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":6,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -117,7 +117,7 @@

    Class SnapshotScan<ThisT,T exten
    Direct Known Subclasses:
    -
    AllDataFilesTable.AllDataFilesTableScan, AllDeleteFilesTable.AllDeleteFilesTableScan, AllFilesTable.AllFilesTableScan, AllManifestsTable.AllManifestsTableScan, DataFilesTable.DataFilesTableScan, DataTableScan, DeleteFilesTable.DeleteFilesTableScan, FilesTable.FilesTableScan, PositionDeletesTable.PositionDeletesBatchScan
    +
    AllDataFilesTable.AllDataFilesTableScan, AllDeleteFilesTable.AllDeleteFilesTableScan, AllFilesTable.AllFilesTableScan, AllManifestsTable.AllManifestsTableScan, DataFilesTable.DataFilesTableScan, DataTableScan, DeleteFilesTable.DeleteFilesTableScan, FilesTable.FilesTableScan, PositionDeletesTable.PositionDeletesBatchScan, SparkDistributedDataScan


    @@ -151,6 +151,18 @@

    Field Summary

    protected static java.util.List<java.lang.String> DELETE_SCAN_WITH_STATS_COLUMNS 
    protected static booleanPLAN_SCANS_WITH_WORKER_POOL 
    protected static java.util.List<java.lang.String>SCAN_COLUMNS 
    protected static java.util.List<java.lang.String>SCAN_WITH_STATS_COLUMNS 
    @@ -182,7 +194,7 @@

    Constructor Summary

    Method Summary

    - + @@ -232,30 +244,29 @@

    Method Summary

    + + + + - - - + + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + - + + + + + - + - + - + - + - - - - - + - + - + - + - + + + + +
    All Methods Instance Methods Abstract Methods Concrete Methods Deprecated Methods All Methods Instance Methods Abstract Methods Concrete Methods 
    Modifier and Type Method and Description
    protected FileIOio() 
    boolean isCaseSensitive()
    Returns whether this scan is case-sensitive with respect to column names.
    protected ThisTnewRefinedScan(TableOperations ignored, - Table newTable, - Schema newSchema, - org.apache.iceberg.TableScanContext newContext) -
    Deprecated.  -
    will be removed in 1.3.0; use newRefinedScan(Table, Schema, TableScanContext) - instead.
    -
    +
    ThisTmetricsReporter(MetricsReporter reporter) +
    Create a new scan that will report scan metrics to the provided reporter in addition to + reporters maintained by the scan.
    protected abstract ThisT newRefinedScan(Table newTable, Schema newSchema, org.apache.iceberg.TableScanContext newContext) 
    ThisT option(java.lang.String property, java.lang.String value) @@ -263,114 +274,118 @@

    Method Summary

    behavior based on the incoming pair.
    protected java.util.Map<java.lang.String,java.lang.String> options() 
    protected java.util.concurrent.ExecutorService planExecutor() 
    CloseableIterable<T> planFiles()
    Plan tasks for this scan where each task reads a single file.
    ThisT planWith(java.util.concurrent.ExecutorService executorService)
    Create a new scan to use a particular executor to plan.
    ThisT project(Schema projectedSchema)
    Create a new scan from this with the schema as its projection.
    protected ExpressionresidualFilter() 
    protected java.util.List<java.lang.String> scanColumns() 
    protected ScanMetrics scanMetrics() 
    Schema schema()
    Returns this scan's projection Schema.
    ThisT select(java.util.Collection<java.lang.String> columns)
    Create a new scan from this that will read the given data columns.
    protected boolean shouldIgnoreResiduals() 
    protected boolean shouldPlanWithExecutor() 
    protected booleanshouldReturnColumnStats() 
    Snapshot snapshot() 
    protected java.lang.Long snapshotId() 
    int splitLookback()
    Returns the split lookback for this scan.
    long splitOpenFileCost()
    Returns the split open file cost for this scan.
    Table table() 
    protected TableOperationstableOps() -
    Deprecated.  -
    will be removed in 1.3.0; avoid using TableOperations for scans or use BaseTable
    -
    -
    protected Schema tableSchema() 
    long targetSplitSize()
    Returns the target split size for this scan.
    java.lang.String toString() 
    ThisT useRef(java.lang.String name) 
    ThisT useSnapshot(long scanSnapshotId) 
    protected booleanuseSnapshotSchema() 
    + + + + @@ -539,24 +590,22 @@

    toString

    - + - + @@ -595,6 +644,15 @@

    scanColumns

    protected java.util.List<java.lang.String> scanColumns()
    +
    + + + @@ -604,37 +662,31 @@

    shouldIgnoreResiduals

    protected boolean shouldIgnoreResiduals()
    - + - + - + @@ -895,7 +947,7 @@

    splitLookback

    - diff --git a/javadoc/org/apache/iceberg/SortOrder.Builder.html b/javadoc/org/apache/iceberg/SortOrder.Builder.html index a5b954267..3603612bd 100644 --- a/javadoc/org/apache/iceberg/SortOrder.Builder.html +++ b/javadoc/org/apache/iceberg/SortOrder.Builder.html @@ -152,7 +152,9 @@

    Method Summary

    SortOrder.Builder -caseSensitive(boolean sortCaseSensitive)  +caseSensitive(boolean sortCaseSensitive) +
    Set case sensitivity of sort column name resolution.
    + SortOrder.Builder @@ -284,6 +286,16 @@

    withOrderId

  • caseSensitive

    public SortOrder.Builder caseSensitive(boolean sortCaseSensitive)
    +
    Description copied from interface: SortOrderBuilder
    +
    Set case sensitivity of sort column name resolution.
    +
    +
    Specified by:
    +
    caseSensitive in interface SortOrderBuilder<SortOrder.Builder>
    +
    Parameters:
    +
    sortCaseSensitive - when true, column name resolution is case-sensitive
    +
    Returns:
    +
    this for method chaining
    +
  • diff --git a/javadoc/org/apache/iceberg/SortOrderBuilder.html b/javadoc/org/apache/iceberg/SortOrderBuilder.html index 6cb25788c..a59f3e7fb 100644 --- a/javadoc/org/apache/iceberg/SortOrderBuilder.html +++ b/javadoc/org/apache/iceberg/SortOrderBuilder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":18,"i1":18,"i2":18,"i3":6,"i4":18,"i5":18,"i6":18,"i7":6}; +var methods = {"i0":18,"i1":18,"i2":18,"i3":6,"i4":18,"i5":18,"i6":18,"i7":18,"i8":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -155,24 +155,30 @@

    Method Summary

    default
    R +caseSensitive(boolean caseSensitive) +
    Set case sensitivity of sort column name resolution.
    + + + +default R desc(java.lang.String name)
    Add a field to the sort by field name, ascending with nulls first.
    - + default R desc(java.lang.String name, NullOrder nullOrder)
    Add a field to the sort by field name, ascending with the given null order.
    - + default R desc(Term term)
    Add an expression term to the sort, ascending with nulls first.
    - + R desc(Term term, NullOrder nullOrder) @@ -315,7 +321,7 @@

    desc

    - diff --git a/javadoc/org/apache/iceberg/SortOrderParser.html b/javadoc/org/apache/iceberg/SortOrderParser.html index c4aab3a48..1b9d25275 100644 --- a/javadoc/org/apache/iceberg/SortOrderParser.html +++ b/javadoc/org/apache/iceberg/SortOrderParser.html @@ -48,7 +48,7 @@ +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          propertyKey

          +
          public final java.lang.String propertyKey()
          +
        • +
        + + + +
          +
        • +

          envKey

          +
          public final java.lang.String envKey()
          +
        • +
        + + + +
          +
        • +

          defaultValue

          +
          public final T defaultValue()
          +
        • +
        + + + +
          +
        • +

          value

          +
          public final T value()
          +
        • +
        +
      • +
      +
    • +
    +
    + + + + + + + + diff --git a/javadoc/org/apache/iceberg/SystemConfigs.html b/javadoc/org/apache/iceberg/SystemConfigs.html new file mode 100644 index 000000000..bb8ecf26c --- /dev/null +++ b/javadoc/org/apache/iceberg/SystemConfigs.html @@ -0,0 +1,295 @@ + + + + + +SystemConfigs + + + + + + + + + + + +
    +
    org.apache.iceberg
    +

    Class SystemConfigs

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • org.apache.iceberg.SystemConfigs
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class SystemConfigs
      +extends java.lang.Object
      +
      Configuration properties that are controlled by Java system properties or environmental variable.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          WORKER_THREAD_POOL_SIZE

          +
          public static final SystemConfigs.ConfigEntry<java.lang.Integer> WORKER_THREAD_POOL_SIZE
          +
          Sets the size of the worker pool. The worker pool limits the number of tasks concurrently + processing manifests in the base table implementation across all concurrent planning or commit + operations.
          +
        • +
        + + + +
          +
        • +

          SCAN_THREAD_POOL_ENABLED

          +
          public static final SystemConfigs.ConfigEntry<java.lang.Boolean> SCAN_THREAD_POOL_ENABLED
          +
          Whether to use the shared worker pool when planning table scans.
          +
        • +
        + + + +
          +
        • +

          IO_MANIFEST_CACHE_MAX_FILEIO

          +
          public static final SystemConfigs.ConfigEntry<java.lang.Integer> IO_MANIFEST_CACHE_MAX_FILEIO
          +
          Maximum number of distinct FileIO that is allowed to have + associated ContentCache in memory at a time.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/SystemProperties.html b/javadoc/org/apache/iceberg/SystemProperties.html index e7a803d74..10159ae01 100644 --- a/javadoc/org/apache/iceberg/SystemProperties.html +++ b/javadoc/org/apache/iceberg/SystemProperties.html @@ -41,7 +41,7 @@ diff --git a/javadoc/org/apache/iceberg/arrow/package-summary.html b/javadoc/org/apache/iceberg/arrow/package-summary.html index b119dc34a..1176cf8c6 100644 --- a/javadoc/org/apache/iceberg/arrow/package-summary.html +++ b/javadoc/org/apache/iceberg/arrow/package-summary.html @@ -88,6 +88,12 @@

    Package org.apache.iceberg.arrow

    ArrowSchemaUtil   + +DictEncodedArrowConverter + +
    This converts dictionary encoded arrow vectors to a correctly typed arrow vector.
    + + diff --git a/javadoc/org/apache/iceberg/arrow/package-tree.html b/javadoc/org/apache/iceberg/arrow/package-tree.html index f0eca177f..5be0feacf 100644 --- a/javadoc/org/apache/iceberg/arrow/package-tree.html +++ b/javadoc/org/apache/iceberg/arrow/package-tree.html @@ -81,6 +81,7 @@

    Class Hierarchy

    diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/ColumnVector.html b/javadoc/org/apache/iceberg/arrow/vectorized/ColumnVector.html index bcda54357..fbfce3b38 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/ColumnVector.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/ColumnVector.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -132,6 +132,7 @@

    Class ColumnVector

  • Types.DateType
  • Types.TimeType
  • Types.UUIDType +
  • Types.DecimalType
  • @@ -156,46 +157,60 @@

    Method Summary

    close()  +org.apache.arrow.vector.FieldVector +getArrowVector() +
    Decodes a dict-encoded vector and returns the actual arrow vector.
    + + + byte[] getBinary(int rowId)  - + boolean getBoolean(int rowId)  - + +java.math.BigDecimal +getDecimal(int rowId, + int precision, + int scale)  + + double getDouble(int rowId)  - + org.apache.arrow.vector.FieldVector -getFieldVector()  +getFieldVector() +
    Returns the potentially dict-encoded FieldVector.
    + - + float getFloat(int rowId)  - + int getInt(int rowId)  - + long getLong(int rowId)  - + java.lang.String getString(int rowId)  - + boolean hasNull()  - + boolean isNullAt(int rowId)  - + int numNulls()  @@ -228,6 +243,25 @@

    Method Detail

  • getFieldVector

    public org.apache.arrow.vector.FieldVector getFieldVector()
    +
    Returns the potentially dict-encoded FieldVector.
    +
    +
    Returns:
    +
    instance of FieldVector
    +
    +
  • + + + + + @@ -327,12 +361,23 @@

    getString

    - diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/GenericArrowVectorAccessorFactory.StringFactory.html b/javadoc/org/apache/iceberg/arrow/vectorized/GenericArrowVectorAccessorFactory.StringFactory.html index 151314715..9c13f4b90 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/GenericArrowVectorAccessorFactory.StringFactory.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/GenericArrowVectorAccessorFactory.StringFactory.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":6}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":18,"i4":6}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -122,7 +122,7 @@

    -All Methods Instance Methods Abstract Methods  +All Methods Instance Methods Abstract Methods Default Methods  Modifier and Type Method and Description @@ -146,6 +146,13 @@

    Method Summary

    +default Utf8StringT +ofRow(org.apache.arrow.vector.FixedSizeBinaryVector vector, + int rowId) +
    Create a UTF8 String from the row value in the FixedSizeBinaryVector vector.
    + + + Utf8StringT ofRow(org.apache.arrow.vector.VarCharVector vector, int rowId) @@ -188,6 +195,17 @@

    ofRow

    Create a UTF8 String from the row value in the arrow vector.
    + + + + diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/VectorHolder.ConstantVectorHolder.html b/javadoc/org/apache/iceberg/arrow/vectorized/VectorHolder.ConstantVectorHolder.html index 740afa7b8..2ab5b124b 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/VectorHolder.ConstantVectorHolder.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/VectorHolder.ConstantVectorHolder.html @@ -157,6 +157,15 @@

    Constructor Summary

    ConstantVectorHolder(int numRows, + T constantValue) +
    Deprecated.  +
    since 1.4.0, will be removed in 1.5.0; use typed constant holders instead.
    +
    + + + +ConstantVectorHolder(Types.NestedField icebergField, + int numRows, T constantValue)  @@ -188,7 +197,7 @@

    Method Summary

    Methods inherited from class org.apache.iceberg.arrow.vectorized.VectorHolder

    -constantHolder, deletedVectorHolder, descriptor, dictionary, dummyHolder, icebergType, isDictionaryEncoded, isDummy, nullabilityHolder, vector +constantHolder, constantHolder, deletedVectorHolder, descriptor, dictionary, dummyHolder, icebergField, icebergType, isDictionaryEncoded, isDummy, nullabilityHolder, vector
    • diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.html b/javadoc/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.html index a0ea2bd09..c1493779c 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/VectorizedArrowReader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10}; +var methods = {"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -213,29 +213,33 @@

      Method Summary

      +protected
      Types.NestedField +icebergField()  + + static VectorizedArrowReader nulls()  - + static VectorizedArrowReader positions()  - + static VectorizedArrowReader positionsWithSetArrowValidityVector()  - + VectorHolder read(VectorHolder reuse, int numValsToRead)
      Reads a batch of type @param <T> and of size numRows
      - + void setBatchSize(int batchSize)  - + void setRowGroupInfo(org.apache.parquet.column.page.PageReadStore source, java.util.Map<org.apache.parquet.hadoop.metadata.ColumnPath,org.apache.parquet.hadoop.metadata.ColumnChunkMetaData> metadata, @@ -243,7 +247,7 @@

      Method Summary

      Sets the row group information to be used with this reader
      - + java.lang.String toString()  @@ -310,6 +314,15 @@

      VectorizedArrowReader

      Method Detail

      + + + + diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.BatchReader.html b/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.BatchReader.html index 399fb225c..e736b5788 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.BatchReader.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.BatchReader.html @@ -107,7 +107,7 @@

      Class Vecto
    • Direct Known Subclasses:
      -
      VectorizedColumnIterator.BooleanBatchReader, VectorizedColumnIterator.DictionaryBatchReader, VectorizedColumnIterator.DoubleBatchReader, VectorizedColumnIterator.FixedLengthDecimalBatchReader, VectorizedColumnIterator.FixedSizeBinaryBatchReader, VectorizedColumnIterator.FixedWidthTypeBinaryBatchReader, VectorizedColumnIterator.FloatBatchReader, VectorizedColumnIterator.IntBackedDecimalBatchReader, VectorizedColumnIterator.IntegerBatchReader, VectorizedColumnIterator.LongBackedDecimalBatchReader, VectorizedColumnIterator.LongBatchReader, VectorizedColumnIterator.TimestampMillisBatchReader, VectorizedColumnIterator.VarWidthTypeBatchReader
      +
      VectorizedColumnIterator.BooleanBatchReader, VectorizedColumnIterator.DictionaryBatchReader, VectorizedColumnIterator.DoubleBatchReader, VectorizedColumnIterator.FixedSizeBinaryBatchReader, VectorizedColumnIterator.FixedWidthTypeBinaryBatchReader, VectorizedColumnIterator.FloatBatchReader, VectorizedColumnIterator.IntegerBatchReader, VectorizedColumnIterator.LongBatchReader, VectorizedColumnIterator.TimestampInt96BatchReader, VectorizedColumnIterator.TimestampMillisBatchReader, VectorizedColumnIterator.VarWidthTypeBatchReader
      Enclosing class:
      diff --git a/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.DoubleBatchReader.html b/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.DoubleBatchReader.html index 12a022968..e66d20132 100644 --- a/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.DoubleBatchReader.html +++ b/javadoc/org/apache/iceberg/arrow/vectorized/parquet/VectorizedColumnIterator.DoubleBatchReader.html @@ -48,7 +48,7 @@
      All Known Implementing Classes:
      -
      DataWriter, FlinkAvroWriter, SparkAvroWriter
      +
      DataWriter, FlinkAvroWriter, GenericAvroWriter, SparkAvroWriter


      diff --git a/javadoc/org/apache/iceberg/avro/SupportsRowPosition.html b/javadoc/org/apache/iceberg/avro/SupportsRowPosition.html index 24368108b..daa284b64 100644 --- a/javadoc/org/apache/iceberg/avro/SupportsRowPosition.html +++ b/javadoc/org/apache/iceberg/avro/SupportsRowPosition.html @@ -99,7 +99,7 @@

      Interface SupportsRowPos
    • All Known Implementing Classes:
      -
      DataReader, FlinkAvroReader, ProjectionDatumReader, SparkAvroReader, ValueReaders.StructReader
      +
      DataReader, FlinkAvroReader, GenericAvroReader, ProjectionDatumReader, SparkAvroReader, ValueReaders.StructReader


      diff --git a/javadoc/org/apache/iceberg/avro/package-frame.html b/javadoc/org/apache/iceberg/avro/package-frame.html index 5dea24a50..dbc73a00b 100644 --- a/javadoc/org/apache/iceberg/avro/package-frame.html +++ b/javadoc/org/apache/iceberg/avro/package-frame.html @@ -31,6 +31,8 @@

      Classes

    • AvroSchemaVisitor
    • AvroSchemaWithTypeVisitor
    • AvroWithPartnerByStructureVisitor
    • +
    • GenericAvroReader
    • +
    • GenericAvroWriter
    • LogicalMap
    • ProjectionDatumReader
    • RemoveIds
    • diff --git a/javadoc/org/apache/iceberg/avro/package-summary.html b/javadoc/org/apache/iceberg/avro/package-summary.html index ac3b168b3..a6b2b04cb 100644 --- a/javadoc/org/apache/iceberg/avro/package-summary.html +++ b/javadoc/org/apache/iceberg/avro/package-summary.html @@ -163,6 +163,14 @@

      Package org.apache.iceberg.avro

      +GenericAvroReader<T> +  + + +GenericAvroWriter<T> +  + + LogicalMap   diff --git a/javadoc/org/apache/iceberg/avro/package-tree.html b/javadoc/org/apache/iceberg/avro/package-tree.html index 1151e9758..d2fb3e6fa 100644 --- a/javadoc/org/apache/iceberg/avro/package-tree.html +++ b/javadoc/org/apache/iceberg/avro/package-tree.html @@ -104,6 +104,8 @@

      Class Hierarchy

    • org.apache.iceberg.avro.UUIDConversion
    +
  • org.apache.iceberg.avro.GenericAvroReader<T> (implements org.apache.avro.io.DatumReader<D>, org.apache.iceberg.avro.SupportsRowPosition)
  • +
  • org.apache.iceberg.avro.GenericAvroWriter<T> (implements org.apache.iceberg.avro.MetricsAwareDatumWriter<D>)
  • org.apache.avro.LogicalType
    • org.apache.iceberg.avro.LogicalMap
    • diff --git a/javadoc/org/apache/iceberg/aws/AssumeRoleAwsClientFactory.html b/javadoc/org/apache/iceberg/aws/AssumeRoleAwsClientFactory.html index 6ad2ed008..8559f3f53 100644 --- a/javadoc/org/apache/iceberg/aws/AssumeRoleAwsClientFactory.html +++ b/javadoc/org/apache/iceberg/aws/AssumeRoleAwsClientFactory.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -178,27 +178,35 @@

      Method Summary

      +protected HttpClientProperties +httpClientProperties()  + + void initialize(java.util.Map<java.lang.String,java.lang.String> properties)
      Initialize AWS client factory from catalog properties.
      - + software.amazon.awssdk.services.kms.KmsClient kms()
      Create a AWS KMS client
      - + protected java.lang.String region()  - + software.amazon.awssdk.services.s3.S3Client s3()
      create a Amazon S3 client
      + +protected S3FileIOProperties +s3FileIOProperties()  + @@ -228,7 +228,7 @@

      configureEndpoint

      @Deprecated
       public static <T extends software.amazon.awssdk.core.client.builder.SdkClientBuilder> void configureEndpoint(T builder,
                                                                                                                                java.lang.String endpoint)
      -
      Deprecated. Not for public use. To configure the endpoint for a client, please use AwsProperties.applyS3EndpointConfigurations(S3ClientBuilder), AwsProperties.applyGlueEndpointConfigurations(GlueClientBuilder), or AwsProperties.applyDynamoDbEndpointConfigurations(DynamoDbClientBuilder) accordingly. It +
      Configure the endpoint setting for a client
      diff --git a/javadoc/org/apache/iceberg/aws/AwsClientFactory.html b/javadoc/org/apache/iceberg/aws/AwsClientFactory.html index ce22ecb8d..6e9c63c25 100644 --- a/javadoc/org/apache/iceberg/aws/AwsClientFactory.html +++ b/javadoc/org/apache/iceberg/aws/AwsClientFactory.html @@ -48,7 +48,7 @@ -
      -
        -
      • - -
          -
        • - - -

          Field Detail

          - - - -
            -
          • -

            S3FILEIO_SSE_TYPE

            -
            public static final java.lang.String S3FILEIO_SSE_TYPE
            -
            Type of S3 Server side encryption used, default to S3FILEIO_SSE_TYPE_NONE. - -

            For more details: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_TYPE_NONE

            -
            public static final java.lang.String S3FILEIO_SSE_TYPE_NONE
            -
            No server side encryption.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_TYPE_KMS

            -
            public static final java.lang.String S3FILEIO_SSE_TYPE_KMS
            -
            S3 SSE-KMS encryption. - -

            For more details: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_TYPE_S3

            -
            public static final java.lang.String S3FILEIO_SSE_TYPE_S3
            -
            S3 SSE-S3 encryption. - -

            For more details: - https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_TYPE_CUSTOM

            -
            public static final java.lang.String S3FILEIO_SSE_TYPE_CUSTOM
            -
            S3 SSE-C encryption. - -

            For more details: - https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_KEY

            -
            public static final java.lang.String S3FILEIO_SSE_KEY
            -
            If S3 encryption type is SSE-KMS, input is a KMS Key ID or ARN. In case this property is not - set, default key "aws/s3" is used. If encryption type is SSE-C, input is a custom base-64 - AES256 symmetric key.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SSE_MD5

            -
            public static final java.lang.String S3FILEIO_SSE_MD5
            -
            If S3 encryption type is SSE-C, input is the base-64 MD5 digest of the secret key. This MD5 - must be explicitly passed in by the caller to ensure key integrity.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_ID

            -
            public static final java.lang.String GLUE_CATALOG_ID
            -
            The ID of the Glue Data Catalog where the tables reside. If none is provided, Glue - automatically uses the caller's AWS account ID by default. - -

            For more details, see - https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-databases.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_ACCOUNT_ID

            -
            public static final java.lang.String GLUE_ACCOUNT_ID
            -
            The account ID used in a Glue resource ARN, e.g. - arn:aws:glue:us-east-1:1000000000000:table/db1/table1
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_SKIP_ARCHIVE

            -
            public static final java.lang.String GLUE_CATALOG_SKIP_ARCHIVE
            -
            If Glue should skip archiving an old table version when creating a new version in a commit. By - default Glue archives all old table versions after an UpdateTable call, but Glue has a default - max number of archived table versions (can be increased). So for streaming use case with lots - of commits, it is recommended to set this value to true.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_SKIP_ARCHIVE_DEFAULT

            -
            public static final boolean GLUE_CATALOG_SKIP_ARCHIVE_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_SKIP_NAME_VALIDATION

            -
            public static final java.lang.String GLUE_CATALOG_SKIP_NAME_VALIDATION
            -
            If Glue should skip name validations It is recommended to stick to Glue best practice in - https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html to make sure operations - are Hive compatible. This is only added for users that have existing conventions using - non-standard characters. When database name and table name validation are skipped, there is no - guarantee that downstream systems would all support the names.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_SKIP_NAME_VALIDATION_DEFAULT

            -
            public static final boolean GLUE_CATALOG_SKIP_NAME_VALIDATION_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_LAKEFORMATION_ENABLED

            -
            public static final java.lang.String GLUE_LAKEFORMATION_ENABLED
            -
            If set, GlueCatalog will use Lake Formation for access control. For more credential vending - details, see: https://docs.aws.amazon.com/lake-formation/latest/dg/api-overview.html. If - enabled, the AwsClientFactory implementation must be LakeFormationAwsClientFactory or any class that extends it.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_LAKEFORMATION_ENABLED_DEFAULT

            -
            public static final boolean GLUE_LAKEFORMATION_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            GLUE_CATALOG_ENDPOINT

            -
            public static final java.lang.String GLUE_CATALOG_ENDPOINT
            -
            Configure an alternative endpoint of the Glue service for GlueCatalog to access. - -

            This could be used to use GlueCatalog with any glue-compatible metastore service that has a - different endpoint

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_UPLOAD_THREADS

            -
            public static final java.lang.String S3FILEIO_MULTIPART_UPLOAD_THREADS
            -
            Number of threads to use for uploading parts to S3 (shared pool across all output streams), - default to Runtime.availableProcessors()
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_SIZE

            -
            public static final java.lang.String S3FILEIO_MULTIPART_SIZE
            -
            The size of a single part for multipart upload requests in bytes (default: 32MB). based on S3 - requirement, the part size must be at least 5MB. Too ensure performance of the reader and - writer, the part size must be less than 2GB. - -

            For more details, see https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_SIZE_DEFAULT

            -
            public static final int S3FILEIO_MULTIPART_SIZE_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_SIZE_MIN

            -
            public static final int S3FILEIO_MULTIPART_SIZE_MIN
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_THRESHOLD_FACTOR

            -
            public static final java.lang.String S3FILEIO_MULTIPART_THRESHOLD_FACTOR
            -
            The threshold expressed as a factor times the multipart size at which to switch from uploading - using a single put object request to uploading using multipart upload (default: 1.5).
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_MULTIPART_THRESHOLD_FACTOR_DEFAULT

            -
            public static final double S3FILEIO_MULTIPART_THRESHOLD_FACTOR_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_STAGING_DIRECTORY

            -
            public static final java.lang.String S3FILEIO_STAGING_DIRECTORY
            -
            Location to put staging files for upload to S3, default to temp directory set in - java.io.tmpdir.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_ACL

            -
            public static final java.lang.String S3FILEIO_ACL
            -
            Used to configure canned access control list (ACL) for S3 client to use during write. If not - set, ACL will not be set for requests. - -

            The input must be one of ObjectCannedACL, - such as 'public-read-write' For more details: - https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_ENDPOINT

            -
            public static final java.lang.String S3FILEIO_ENDPOINT
            -
            Configure an alternative endpoint of the S3 service for S3FileIO to access. - -

            This could be used to use S3FileIO with any s3-compatible object storage service that has a - different endpoint, or access a private S3 endpoint in a virtual private cloud.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_PATH_STYLE_ACCESS

            -
            public static final java.lang.String S3FILEIO_PATH_STYLE_ACCESS
            -
            If set true, requests to S3FileIO will use Path-Style, otherwise, Virtual Hosted-Style - will be used. - -

            For more details: https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_PATH_STYLE_ACCESS_DEFAULT

            -
            public static final boolean S3FILEIO_PATH_STYLE_ACCESS_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_ACCESS_KEY_ID

            -
            public static final java.lang.String S3FILEIO_ACCESS_KEY_ID
            -
            Configure the static access key ID used to access S3FileIO. - -

            When set, the default client factory will use the basic or session credentials provided - instead of reading the default credential chain to create S3 access credentials. If S3FILEIO_SESSION_TOKEN is set, session credential is used, otherwise basic credential is - used.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SECRET_ACCESS_KEY

            -
            public static final java.lang.String S3FILEIO_SECRET_ACCESS_KEY
            -
            Configure the static secret access key used to access S3FileIO. - -

            When set, the default client factory will use the basic or session credentials provided - instead of reading the default credential chain to create S3 access credentials. If S3FILEIO_SESSION_TOKEN is set, session credential is used, otherwise basic credential is - used.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_SESSION_TOKEN

            -
            public static final java.lang.String S3FILEIO_SESSION_TOKEN
            -
            Configure the static session token used to access S3FileIO. - -

            When set, the default client factory will use the session credentials provided instead of - reading the default credential chain to create S3 access credentials.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_USE_ARN_REGION_ENABLED

            -
            public static final java.lang.String S3_USE_ARN_REGION_ENABLED
            -
            Enable to make S3FileIO, to make cross-region call to the region specified in the ARN of an - access point. - -

            By default, attempting to use an access point in a different region will throw an exception. - When enabled, this property allows using access points in other regions. - -

            For more details see: - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/s3/S3Configuration.html#useArnRegionEnabled--

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_USE_ARN_REGION_ENABLED_DEFAULT

            -
            public static final boolean S3_USE_ARN_REGION_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_CHECKSUM_ENABLED

            -
            public static final java.lang.String S3_CHECKSUM_ENABLED
            -
            Enables eTag checks for S3 PUT and MULTIPART upload requests.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_CHECKSUM_ENABLED_DEFAULT

            -
            public static final boolean S3_CHECKSUM_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_REMOTE_SIGNING_ENABLED

            -
            public static final java.lang.String S3_REMOTE_SIGNING_ENABLED
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_REMOTE_SIGNING_ENABLED_DEFAULT

            -
            public static final boolean S3_REMOTE_SIGNING_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_DELETE_BATCH_SIZE

            -
            public static final java.lang.String S3FILEIO_DELETE_BATCH_SIZE
            -
            Configure the batch size used when deleting multiple files from a given S3 bucket
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_DELETE_BATCH_SIZE_DEFAULT

            -
            public static final int S3FILEIO_DELETE_BATCH_SIZE_DEFAULT
            -
            Default batch size used when deleting files. - -

            Refer to https://github.com/apache/hadoop/commit/56dee667707926f3796c7757be1a133a362f05c9 - for more details on why this value was chosen.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_DELETE_BATCH_SIZE_MAX

            -
            public static final int S3FILEIO_DELETE_BATCH_SIZE_MAX
            -
            Max possible batch size for deletion. Currently, a max of 1000 keys can be deleted in one - batch. https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            DYNAMODB_ENDPOINT

            -
            public static final java.lang.String DYNAMODB_ENDPOINT
            -
            Configure an alternative endpoint of the DynamoDB service to access.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - - - - - -
            -
          • -

            DYNAMODB_TABLE_NAME_DEFAULT

            -
            public static final java.lang.String DYNAMODB_TABLE_NAME_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - - - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_ARN

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_ARN
            -
            Used by AssumeRoleAwsClientFactory. If set, all AWS clients will assume a role of the - given ARN, instead of using the default credential chain.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_TAGS_PREFIX

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_TAGS_PREFIX
            -
            Used by AssumeRoleAwsClientFactory to pass a list of sessions. Each session tag - consists of a key name and an associated value.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_TIMEOUT_SEC

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_TIMEOUT_SEC
            -
            Used by AssumeRoleAwsClientFactory. The timeout of the assume role session in seconds, - default to 1 hour. At the end of the timeout, a new set of role session credentials will be - fetched through a STS client.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_TIMEOUT_SEC_DEFAULT

            -
            public static final int CLIENT_ASSUME_ROLE_TIMEOUT_SEC_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_EXTERNAL_ID

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_EXTERNAL_ID
            -
            Used by AssumeRoleAwsClientFactory. Optional external ID used to assume an IAM role. - -

            For more details, see - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_REGION

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_REGION
            -
            Used by AssumeRoleAwsClientFactory. If set, all AWS clients except STS client will use - the given region instead of the default region chain. - -

            The value must be one of Region, such as 'us-east-1'. - For more details, see https://docs.aws.amazon.com/general/latest/gr/rande.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_ASSUME_ROLE_SESSION_NAME

            -
            public static final java.lang.String CLIENT_ASSUME_ROLE_SESSION_NAME
            -
            Used by AssumeRoleAwsClientFactory. Optional session name used to assume an IAM role. - -

            For more details, see - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_CREDENTIALS_PROVIDER

            -
            public static final java.lang.String CLIENT_CREDENTIALS_PROVIDER
            -
            Configure the AWS credentials provider used to create AWS clients. A fully qualified concrete - class with package that implements the AwsCredentialsProvider interface is required. - -

            Additionally, the implementation class must also have a create() or create(Map) method - implemented, which returns an instance of the class that provides aws credentials provider. - -

            Example: - client.credentials-provider=software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider - -

            When set, the default client factory AwsClientFactories.DefaultAwsClientFactory and also other client - factory classes will use this provider to get AWS credentials provided instead of reading the - default credential chain to get AWS access credentials.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            CLIENT_REGION

            -
            public static final java.lang.String CLIENT_REGION
            -
            Used by AwsClientFactories.DefaultAwsClientFactory and also - other client factory classes. If set, all AWS clients except STS client will use the given - region instead of the default region chain.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_TYPE

            -
            public static final java.lang.String HTTP_CLIENT_TYPE
            -
            The type of SdkHttpClient implementation used by AwsClientFactory If set, all AWS clients will use this specified HTTP client. If not set, - HTTP_CLIENT_TYPE_DEFAULT will be used. For specific types supported, see - HTTP_CLIENT_TYPE_* defined below.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - - - - - - - - - -
            -
          • -

            HTTP_CLIENT_TYPE_DEFAULT

            -
            public static final java.lang.String HTTP_CLIENT_TYPE_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_URLCONNECTION_CONNECTION_TIMEOUT_MS

            -
            public static final java.lang.String HTTP_CLIENT_URLCONNECTION_CONNECTION_TIMEOUT_MS
            -
            Used to configure the connection timeout in milliseconds for UrlConnectionHttpClient.Builder. This flag only - works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_URLCONNECTION - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/urlconnection/UrlConnectionHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_URLCONNECTION_SOCKET_TIMEOUT_MS

            -
            public static final java.lang.String HTTP_CLIENT_URLCONNECTION_SOCKET_TIMEOUT_MS
            -
            Used to configure the socket timeout in milliseconds for UrlConnectionHttpClient.Builder. This flag only - works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_URLCONNECTION - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/urlconnection/UrlConnectionHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_CONNECTION_TIMEOUT_MS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_CONNECTION_TIMEOUT_MS
            -
            Used to configure the connection timeout in milliseconds for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_SOCKET_TIMEOUT_MS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_SOCKET_TIMEOUT_MS
            -
            Used to configure the socket timeout in milliseconds for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_CONNECTION_ACQUISITION_TIMEOUT_MS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_CONNECTION_ACQUISITION_TIMEOUT_MS
            -
            Used to configure the connection acquisition timeout in milliseconds for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_CONNECTION_MAX_IDLE_TIME_MS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_CONNECTION_MAX_IDLE_TIME_MS
            -
            Used to configure the connection max idle time in milliseconds for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_CONNECTION_TIME_TO_LIVE_MS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_CONNECTION_TIME_TO_LIVE_MS
            -
            Used to configure the connection time to live in milliseconds for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_EXPECT_CONTINUE_ENABLED

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_EXPECT_CONTINUE_ENABLED
            -
            Used to configure whether to enable the expect continue setting for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            In default, this is disabled. - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_MAX_CONNECTIONS

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_MAX_CONNECTIONS
            -
            Used to configure the max connections number for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_TCP_KEEP_ALIVE_ENABLED

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_TCP_KEEP_ALIVE_ENABLED
            -
            Used to configure whether to enable the tcp keep alive setting for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE. - -

            In default, this is disabled. - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            HTTP_CLIENT_APACHE_USE_IDLE_CONNECTION_REAPER_ENABLED

            -
            public static final java.lang.String HTTP_CLIENT_APACHE_USE_IDLE_CONNECTION_REAPER_ENABLED
            -
            Used to configure whether to use idle connection reaper for ApacheHttpClient.Builder. This flag only works when HTTP_CLIENT_TYPE is set to HTTP_CLIENT_TYPE_APACHE. - -

            In default, this is enabled. - -

            For more details, see - https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_WRITE_TAGS_PREFIX

            -
            public static final java.lang.String S3_WRITE_TAGS_PREFIX
            -
            Used by S3FileIO to tag objects when writing. To set, we can pass a catalog property. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html - -

            Example: s3.write.tags.my_key=my_val

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_WRITE_TABLE_TAG_ENABLED

            -
            public static final java.lang.String S3_WRITE_TABLE_TAG_ENABLED
            -
            Used by GlueCatalog to tag objects when writing. To set, we can pass a catalog - property. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html - -

            Example: s3.write.table-tag-enabled=true

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_WRITE_TABLE_TAG_ENABLED_DEFAULT

            -
            public static final boolean S3_WRITE_TABLE_TAG_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_WRITE_NAMESPACE_TAG_ENABLED

            -
            public static final java.lang.String S3_WRITE_NAMESPACE_TAG_ENABLED
            -
            Used by GlueCatalog to tag objects when writing. To set, we can pass a catalog - property. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html - -

            Example: s3.write.namespace-tag-enabled=true

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_WRITE_NAMESPACE_TAG_ENABLED_DEFAULT

            -
            public static final boolean S3_WRITE_NAMESPACE_TAG_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - - - - - - - - - -
            -
          • -

            S3_DELETE_TAGS_PREFIX

            -
            public static final java.lang.String S3_DELETE_TAGS_PREFIX
            -
            Used by S3FileIO to tag objects when deleting. When this config is set, objects are - tagged with the configured key-value pairs before deletion. This is considered a soft-delete, - because users are able to configure tag-based object lifecycle policy at bucket level to - transition objects to different tiers. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html - -

            Example: s3.delete.tags.my_key=my_val

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3FILEIO_DELETE_THREADS

            -
            public static final java.lang.String S3FILEIO_DELETE_THREADS
            -
            Number of threads to use for adding delete tags to S3 objects, default to Runtime.availableProcessors()
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_DELETE_ENABLED

            -
            public static final java.lang.String S3_DELETE_ENABLED
            -
            Determines if S3FileIO deletes the object when io.delete() is called, default to true. - Once disabled, users are expected to set tags through S3_DELETE_TAGS_PREFIX and manage - deleted files through S3 lifecycle policy.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_DELETE_ENABLED_DEFAULT

            -
            public static final boolean S3_DELETE_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_ACCELERATION_ENABLED

            -
            public static final java.lang.String S3_ACCELERATION_ENABLED
            -
            Determines if S3 client will use the Acceleration Mode, default to false. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_ACCELERATION_ENABLED_DEFAULT

            -
            public static final boolean S3_ACCELERATION_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_DUALSTACK_ENABLED

            -
            public static final java.lang.String S3_DUALSTACK_ENABLED
            -
            Determines if S3 client will use the Dualstack Mode, default to false. - -

            For more details, see - https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_DUALSTACK_ENABLED_DEFAULT

            -
            public static final boolean S3_DUALSTACK_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_ACCESS_POINTS_PREFIX

            -
            public static final java.lang.String S3_ACCESS_POINTS_PREFIX
            -
            Used by S3FileIO, prefix used for bucket access point configuration. To set, we can - pass a catalog property. - -

            For more details, see https://aws.amazon.com/s3/features/access-points/ - -

            Example: s3.access-points.my-bucket=access-point

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_PRELOAD_CLIENT_ENABLED

            -
            public static final java.lang.String S3_PRELOAD_CLIENT_ENABLED
            -
            This flag controls whether the S3 client will be initialized during the S3FileIO - initialization, instead of default lazy initialization upon use. This is needed for cases that - the credentials to use might change and needs to be preloaded.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            S3_PRELOAD_CLIENT_ENABLED_DEFAULT

            -
            public static final boolean S3_PRELOAD_CLIENT_ENABLED_DEFAULT
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - - - - - - - - - -
            -
          • -

            REST_SIGNER_REGION

            -
            public static final java.lang.String REST_SIGNER_REGION
            -
            Region to be used by the SigV4 protocol for signing requests.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            REST_SIGNING_NAME

            -
            public static final java.lang.String REST_SIGNING_NAME
            -
            The service name to be used by the SigV4 protocol for signing requests.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            REST_SIGNING_NAME_DEFAULT

            -
            public static final java.lang.String REST_SIGNING_NAME_DEFAULT
            -
            The default service name (API Gateway and lambda) used during SigV4 signing.
            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            REST_ACCESS_KEY_ID

            -
            public static final java.lang.String REST_ACCESS_KEY_ID
            -
            Configure the static access key ID used for SigV4 signing. - -

            When set, the default client factory will use the basic or session credentials provided - instead of reading the default credential chain to create S3 access credentials. If REST_SESSION_TOKEN is set, session credential is used, otherwise basic credential is used.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            REST_SECRET_ACCESS_KEY

            -
            public static final java.lang.String REST_SECRET_ACCESS_KEY
            -
            Configure the static secret access key used for SigV4 signing. - -

            When set, the default client factory will use the basic or session credentials provided - instead of reading the default credential chain to create S3 access credentials. If REST_SESSION_TOKEN is set, session credential is used, otherwise basic credential is used.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          - - - -
            -
          • -

            REST_SESSION_TOKEN

            -
            public static final java.lang.String REST_SESSION_TOKEN
            -
            Configure the static session token used for SigV4. - -

            When set, the default client factory will use the session credentials provided instead of - reading the default credential chain to create access credentials.

            -
            -
            See Also:
            -
            Constant Field Values
            -
            -
          • -
          -
        • -
        - -
          -
        • - - -

          Constructor Detail

          - - - + +void +setGlueLakeFormationEnabled(boolean glueLakeFormationEnabled)  + + +java.util.Set<software.amazon.awssdk.services.sts.model.Tag> +stsClientAssumeRoleTags()  + +
            -
          • -

            AwsProperties

            -
            public AwsProperties()
            -
          • -
          - +
        • -
            -
          • -

            AwsProperties

            -
            public AwsProperties(java.util.Map<java.lang.String,java.lang.String> properties)
            -
          • -
          -
        • +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        - -
          -
        • - - -

          Method Detail

          - - - -
            -
          • -

            stsClientAssumeRoleTags

            -
            public java.util.Set<software.amazon.awssdk.services.sts.model.Tag> stsClientAssumeRoleTags()
          - - - -
            -
          • -

            clientAssumeRoleArn

            -
            public java.lang.String clientAssumeRoleArn()
          - - - +
      +
      • -

        clientAssumeRoleTimeoutSec

        -
        public int clientAssumeRoleTimeoutSec()
        -
      • -
      - - - +
        -
      • -

        clientAssumeRoleExternalId

        -
        public java.lang.String clientAssumeRoleExternalId()
        -
      • -
      - +
    • -
        -
      • -

        clientAssumeRoleRegion

        -
        public java.lang.String clientAssumeRoleRegion()
        -
      • -
      - +

      Field Detail

      +
      • -

        clientAssumeRoleSessionName

        -
        public java.lang.String clientAssumeRoleSessionName()
        +

        GLUE_CATALOG_ID

        +
        public static final java.lang.String GLUE_CATALOG_ID
        +
        The ID of the Glue Data Catalog where the tables reside. If none is provided, Glue + automatically uses the caller's AWS account ID by default. + +

        For more details, see + https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-databases.html

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoSseType

        -
        public java.lang.String s3FileIoSseType()
        +

        GLUE_ACCOUNT_ID

        +
        public static final java.lang.String GLUE_ACCOUNT_ID
        +
        The account ID used in a Glue resource ARN, e.g. + arn:aws:glue:us-east-1:1000000000000:table/db1/table1
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoSseType

        -
        public void setS3FileIoSseType(java.lang.String sseType)
        +

        GLUE_CATALOG_SKIP_ARCHIVE

        +
        public static final java.lang.String GLUE_CATALOG_SKIP_ARCHIVE
        +
        If Glue should skip archiving an old table version when creating a new version in a commit. By + default Glue archives all old table versions after an UpdateTable call, but Glue has a default + max number of archived table versions (can be increased). So for streaming use case with lots + of commits, it is recommended to set this value to true.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoSseKey

        -
        public java.lang.String s3FileIoSseKey()
        +

        GLUE_CATALOG_SKIP_ARCHIVE_DEFAULT

        +
        public static final boolean GLUE_CATALOG_SKIP_ARCHIVE_DEFAULT
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoDeleteBatchSize

        -
        public int s3FileIoDeleteBatchSize()
        +

        GLUE_CATALOG_SKIP_NAME_VALIDATION

        +
        public static final java.lang.String GLUE_CATALOG_SKIP_NAME_VALIDATION
        +
        If Glue should skip name validations It is recommended to stick to Glue best practice in + https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html to make sure operations + are Hive compatible. This is only added for users that have existing conventions using + non-standard characters. When database name and table name validation are skipped, there is no + guarantee that downstream systems would all support the names.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoDeleteBatchSize

        -
        public void setS3FileIoDeleteBatchSize(int deleteBatchSize)
        +

        GLUE_CATALOG_SKIP_NAME_VALIDATION_DEFAULT

        +
        public static final boolean GLUE_CATALOG_SKIP_NAME_VALIDATION_DEFAULT
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoSseKey

        -
        public void setS3FileIoSseKey(java.lang.String sseKey)
        +

        GLUE_LAKEFORMATION_ENABLED

        +
        public static final java.lang.String GLUE_LAKEFORMATION_ENABLED
        +
        If set, GlueCatalog will use Lake Formation for access control. For more credential vending + details, see: https://docs.aws.amazon.com/lake-formation/latest/dg/api-overview.html. If + enabled, the AwsClientFactory implementation must be LakeFormationAwsClientFactory or any class that extends it.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoSseMd5

        -
        public java.lang.String s3FileIoSseMd5()
        +

        GLUE_LAKEFORMATION_ENABLED_DEFAULT

        +
        public static final boolean GLUE_LAKEFORMATION_ENABLED_DEFAULT
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoSseMd5

        -
        public void setS3FileIoSseMd5(java.lang.String sseMd5)
        +

        GLUE_CATALOG_ENDPOINT

        +
        public static final java.lang.String GLUE_CATALOG_ENDPOINT
        +
        Configure an alternative endpoint of the Glue service for GlueCatalog to access. + +

        This could be used to use GlueCatalog with any glue-compatible metastore service that has a + different endpoint

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        glueCatalogId

        -
        public java.lang.String glueCatalogId()
        +

        DYNAMODB_ENDPOINT

        +
        public static final java.lang.String DYNAMODB_ENDPOINT
        +
        Configure an alternative endpoint of the DynamoDB service to access.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setGlueCatalogId

        -
        public void setGlueCatalogId(java.lang.String id)
        +

        DYNAMODB_TABLE_NAME

        +
        public static final java.lang.String DYNAMODB_TABLE_NAME
        +
        DynamoDB table name for DynamoDbCatalog
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        glueCatalogSkipArchive

        -
        public boolean glueCatalogSkipArchive()
        +

        DYNAMODB_TABLE_NAME_DEFAULT

        +
        public static final java.lang.String DYNAMODB_TABLE_NAME_DEFAULT
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setGlueCatalogSkipArchive

        -
        public void setGlueCatalogSkipArchive(boolean skipArchive)
        +

        CLIENT_FACTORY

        +
        public static final java.lang.String CLIENT_FACTORY
        +
        The implementation class of AwsClientFactory to customize AWS client configurations. If + set, all AWS clients will be initialized by the specified factory. If not set, AwsClientFactories.defaultFactory() is used as default factory.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        glueCatalogSkipNameValidation

        -
        public boolean glueCatalogSkipNameValidation()
        +

        CLIENT_ASSUME_ROLE_ARN

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_ARN
        +
        Used by AssumeRoleAwsClientFactory. If set, all AWS clients will assume a role of the + given ARN, instead of using the default credential chain.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setGlueCatalogSkipNameValidation

        -
        public void setGlueCatalogSkipNameValidation(boolean glueCatalogSkipNameValidation)
        +

        CLIENT_ASSUME_ROLE_TAGS_PREFIX

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_TAGS_PREFIX
        +
        Used by AssumeRoleAwsClientFactory to pass a list of sessions. Each session tag + consists of a key name and an associated value.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        glueLakeFormationEnabled

        -
        public boolean glueLakeFormationEnabled()
        +

        CLIENT_ASSUME_ROLE_TIMEOUT_SEC

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_TIMEOUT_SEC
        +
        Used by AssumeRoleAwsClientFactory. The timeout of the assume role session in seconds, + default to 1 hour. At the end of the timeout, a new set of role session credentials will be + fetched through a STS client.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setGlueLakeFormationEnabled

        -
        public void setGlueLakeFormationEnabled(boolean glueLakeFormationEnabled)
        +

        CLIENT_ASSUME_ROLE_TIMEOUT_SEC_DEFAULT

        +
        public static final int CLIENT_ASSUME_ROLE_TIMEOUT_SEC_DEFAULT
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoMultipartUploadThreads

        -
        public int s3FileIoMultipartUploadThreads()
        +

        CLIENT_ASSUME_ROLE_EXTERNAL_ID

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_EXTERNAL_ID
        +
        Used by AssumeRoleAwsClientFactory. Optional external ID used to assume an IAM role. + +

        For more details, see + https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoMultipartUploadThreads

        -
        public void setS3FileIoMultipartUploadThreads(int threads)
        +

        CLIENT_ASSUME_ROLE_REGION

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_REGION
        +
        Used by AssumeRoleAwsClientFactory. If set, all AWS clients except STS client will use + the given region instead of the default region chain. + +

        The value must be one of Region, such as 'us-east-1'. + For more details, see https://docs.aws.amazon.com/general/latest/gr/rande.html

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoMultiPartSize

        -
        public int s3FileIoMultiPartSize()
        +

        CLIENT_ASSUME_ROLE_SESSION_NAME

        +
        public static final java.lang.String CLIENT_ASSUME_ROLE_SESSION_NAME
        +
        Used by AssumeRoleAwsClientFactory. Optional session name used to assume an IAM role. + +

        For more details, see + https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoMultiPartSize

        -
        public void setS3FileIoMultiPartSize(int size)
        +

        LAKE_FORMATION_TABLE_NAME

        +
        public static final java.lang.String LAKE_FORMATION_TABLE_NAME
        +
        Used by LakeFormationAwsClientFactory. The table name used as part of lake formation + credentials request.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIOMultipartThresholdFactor

        -
        public double s3FileIOMultipartThresholdFactor()
        +

        LAKE_FORMATION_DB_NAME

        +
        public static final java.lang.String LAKE_FORMATION_DB_NAME
        +
        Used by LakeFormationAwsClientFactory. The database name used as part of lake formation + credentials request.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoMultipartThresholdFactor

        -
        public void setS3FileIoMultipartThresholdFactor(double factor)
        +

        REST_SIGNER_REGION

        +
        public static final java.lang.String REST_SIGNER_REGION
        +
        Region to be used by the SigV4 protocol for signing requests.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3fileIoStagingDirectory

        -
        public java.lang.String s3fileIoStagingDirectory()
        +

        REST_SIGNING_NAME

        +
        public static final java.lang.String REST_SIGNING_NAME
        +
        The service name to be used by the SigV4 protocol for signing requests.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3fileIoStagingDirectory

        -
        public void setS3fileIoStagingDirectory(java.lang.String directory)
        +

        REST_SIGNING_NAME_DEFAULT

        +
        public static final java.lang.String REST_SIGNING_NAME_DEFAULT
        +
        The default service name (API Gateway and lambda) used during SigV4 signing.
        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        s3FileIoAcl

        -
        public software.amazon.awssdk.services.s3.model.ObjectCannedACL s3FileIoAcl()
        +

        REST_ACCESS_KEY_ID

        +
        public static final java.lang.String REST_ACCESS_KEY_ID
        +
        Configure the static access key ID used for SigV4 signing. + +

        When set, the default client factory will use the basic or session credentials provided + instead of reading the default credential chain to create S3 access credentials. If REST_SESSION_TOKEN is set, session credential is used, otherwise basic credential is used.

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - +
      • -

        setS3FileIoAcl

        -
        public void setS3FileIoAcl(software.amazon.awssdk.services.s3.model.ObjectCannedACL acl)
        +

        REST_SECRET_ACCESS_KEY

        +
        public static final java.lang.String REST_SECRET_ACCESS_KEY
        +
        Configure the static secret access key used for SigV4 signing. + +

        When set, the default client factory will use the basic or session credentials provided + instead of reading the default credential chain to create S3 access credentials. If REST_SESSION_TOKEN is set, session credential is used, otherwise basic credential is used.

        +
        +
        See Also:
        +
        Constant Field Values
        +
      - + -
        +
        • -

          setS3PreloadClientEnabled

          -
          public void setS3PreloadClientEnabled(boolean s3PreloadClientEnabled)
          +

          REST_SESSION_TOKEN

          +
          public static final java.lang.String REST_SESSION_TOKEN
          +
          Configure the static session token used for SigV4. + +

          When set, the default client factory will use the session credentials provided instead of + reading the default credential chain to create access credentials.

          +
          +
          See Also:
          +
          Constant Field Values
          +
        - - - -
          -
        • -

          s3PreloadClientEnabled

          -
          public boolean s3PreloadClientEnabled()
        - + +
          +
        • -
            -
          • -

            dynamoDbTableName

            -
            public java.lang.String dynamoDbTableName()
            -
          • -
          - +

          Constructor Detail

          +
          • -

            setDynamoDbTableName

            -
            public void setDynamoDbTableName(java.lang.String name)
            +

            AwsProperties

            +
            public AwsProperties()
          - + -
            +
            • -

              isS3ChecksumEnabled

              -
              public boolean isS3ChecksumEnabled()
              +

              AwsProperties

              +
              public AwsProperties(java.util.Map<java.lang.String,java.lang.String> properties)
            - - - -
              -
            • -

              setS3ChecksumEnabled

              -
              public void setS3ChecksumEnabled(boolean eTagCheckEnabled)
            - + +
              +
            • -
                -
              • -

                s3WriteTags

                -
                public java.util.Set<software.amazon.awssdk.services.s3.model.Tag> s3WriteTags()
                -
              • -
              - +

              Method Detail

              +
              • -

                s3WriteTableTagEnabled

                -
                public boolean s3WriteTableTagEnabled()
                +

                stsClientAssumeRoleTags

                +
                public java.util.Set<software.amazon.awssdk.services.sts.model.Tag> stsClientAssumeRoleTags()
              - +
              • -

                setS3WriteTableTagEnabled

                -
                public void setS3WriteTableTagEnabled(boolean s3WriteTableNameTagEnabled)
                +

                clientAssumeRoleArn

                +
                public java.lang.String clientAssumeRoleArn()
              - +
              • -

                s3WriteNamespaceTagEnabled

                -
                public boolean s3WriteNamespaceTagEnabled()
                +

                clientAssumeRoleTimeoutSec

                +
                public int clientAssumeRoleTimeoutSec()
              - +
              • -

                setS3WriteNamespaceTagEnabled

                -
                public void setS3WriteNamespaceTagEnabled(boolean s3WriteNamespaceTagEnabled)
                +

                clientAssumeRoleExternalId

                +
                public java.lang.String clientAssumeRoleExternalId()
              - +
              • -

                s3DeleteTags

                -
                public java.util.Set<software.amazon.awssdk.services.s3.model.Tag> s3DeleteTags()
                +

                clientAssumeRoleRegion

                +
                public java.lang.String clientAssumeRoleRegion()
              - +
              • -

                s3FileIoDeleteThreads

                -
                public int s3FileIoDeleteThreads()
                +

                clientAssumeRoleSessionName

                +
                public java.lang.String clientAssumeRoleSessionName()
              - +
              • -

                setS3FileIoDeleteThreads

                -
                public void setS3FileIoDeleteThreads(int threads)
                +

                glueCatalogId

                +
                public java.lang.String glueCatalogId()
              - +
              • -

                isS3DeleteEnabled

                -
                public boolean isS3DeleteEnabled()
                +

                setGlueCatalogId

                +
                public void setGlueCatalogId(java.lang.String id)
              - +
              • -

                setS3DeleteEnabled

                -
                public void setS3DeleteEnabled(boolean s3DeleteEnabled)
                +

                glueCatalogSkipArchive

                +
                public boolean glueCatalogSkipArchive()
              - +
              • -

                s3BucketToAccessPointMapping

                -
                public java.util.Map<java.lang.String,java.lang.String> s3BucketToAccessPointMapping()
                +

                setGlueCatalogSkipArchive

                +
                public void setGlueCatalogSkipArchive(boolean skipArchive)
              - +
              • -

                httpClientProperties

                -
                public java.util.Map<java.lang.String,java.lang.String> httpClientProperties()
                +

                glueCatalogSkipNameValidation

                +
                public boolean glueCatalogSkipNameValidation()
              - +
              • -

                clientRegion

                -
                public java.lang.String clientRegion()
                +

                setGlueCatalogSkipNameValidation

                +
                public void setGlueCatalogSkipNameValidation(boolean glueCatalogSkipNameValidation)
              - +
              • -

                setClientRegion

                -
                public void setClientRegion(java.lang.String clientRegion)
                +

                glueLakeFormationEnabled

                +
                public boolean glueLakeFormationEnabled()
              - - - +
              • -

                applyS3CredentialConfigurations

                -
                public <T extends software.amazon.awssdk.services.s3.S3ClientBuilder> void applyS3CredentialConfigurations(T builder)
                -
                Configure the credentials for an S3 client. - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyS3CredentialConfigurations)
                - 
                +

                setGlueLakeFormationEnabled

                +
                public void setGlueLakeFormationEnabled(boolean glueLakeFormationEnabled)
              - - - +
              • -

                applyClientRegionConfiguration

                -
                public <T extends software.amazon.awssdk.awscore.client.builder.AwsClientBuilder> void applyClientRegionConfiguration(T builder)
                -
                Configure a client AWS region. - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyClientRegionConfiguration)
                - 
                +

                dynamoDbTableName

                +
                public java.lang.String dynamoDbTableName()
              - - - +
              • -

                applyClientCredentialConfigurations

                -
                public <T extends software.amazon.awssdk.awscore.client.builder.AwsClientBuilder> void applyClientCredentialConfigurations(T builder)
                -
                Configure the credential provider for AWS clients. - -

                Sample usage: - -

                -     DynamoDbClient.builder().applyMutation(awsProperties::applyClientCredentialConfigurations)
                - 
                +

                setDynamoDbTableName

                +
                public void setDynamoDbTableName(java.lang.String name)
              - - - +
              • -

                applyS3ServiceConfigurations

                -
                public <T extends software.amazon.awssdk.services.s3.S3ClientBuilder> void applyS3ServiceConfigurations(T builder)
                -
                Configure services settings for an S3 client. The settings include: s3DualStack, - s3UseArnRegion, s3PathStyleAccess, and s3Acceleration - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyS3ServiceConfigurations)
                - 
                +

                httpClientProperties

                +
                @Deprecated
                +public java.util.Map<java.lang.String,java.lang.String> httpClientProperties()
                +
                Deprecated. will be removed in 1.5.0, use HttpClientProperties instead
              - - - +
              • -

                applyS3SignerConfiguration

                -
                public <T extends software.amazon.awssdk.services.s3.S3ClientBuilder> void applyS3SignerConfiguration(T builder)
                -
                Configure a signer for an S3 client. - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyS3SignerConfiguration)
                - 
                +

                clientRegion

                +
                @Deprecated
                +public java.lang.String clientRegion()
                +
                Deprecated. will be removed in 1.5.0, use AwsClientProperties.clientRegion() instead
              - - - +
              • -

                applyHttpClientConfigurations

                -
                public <T extends software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder)
                -
                Configure the httpClient for a client according to the HttpClientType. The two supported - HttpClientTypes are urlconnection and apache - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyHttpClientConfigurations)
                - 
                +

                setClientRegion

                +
                @Deprecated
                +public void setClientRegion(java.lang.String clientRegion)
                +
                Deprecated. will be removed in 1.5.0, use AwsClientProperties.setClientRegion(String) + instead
              - + - +
              • -

                applyS3EndpointConfigurations

                -
                public <T extends software.amazon.awssdk.services.s3.S3ClientBuilder> void applyS3EndpointConfigurations(T builder)
                -
                Override the endpoint for an S3 client. - -

                Sample usage: - -

                -     S3Client.builder().applyMutation(awsProperties::applyS3EndpointConfigurations)
                - 
                +

                applyClientCredentialConfigurations

                +
                @Deprecated
                +public <T extends software.amazon.awssdk.awscore.client.builder.AwsClientBuilder> void applyClientCredentialConfigurations(T builder)
                +
                Deprecated. will be removed in 1.5.0, use AwsClientProperties.applyClientCredentialConfigurations(AwsClientBuilder) instead
              @@ -3136,7 +1161,7 @@

              applyDynamoDbEndpointConfigurations

              Sample usage:

              -     DynamoDbClient.builder().applyMutation(awsProperties::applyS3EndpointConfigurations)
              +     DynamoDbClient.builder().applyMutation(awsProperties::applyDynamoDbEndpointConfigurations)
                
    @@ -3194,8 +1219,8 @@

    restCredentialsProvider

    diff --git a/javadoc/org/apache/iceberg/aws/package-summary.html b/javadoc/org/apache/iceberg/aws/package-summary.html index 58d1ed583..9da160ca0 100644 --- a/javadoc/org/apache/iceberg/aws/package-summary.html +++ b/javadoc/org/apache/iceberg/aws/package-summary.html @@ -106,9 +106,17 @@

    Package org.apache.iceberg.aws

      +AwsClientProperties +  + + AwsProperties   + +HttpClientProperties +  + RESTSigV4Signer @@ -116,6 +124,10 @@

    Package org.apache.iceberg.aws

    for the SigV4 protocol and adds the necessary headers for all requests created by the client. + +S3FileIOAwsClientFactories +  +
  • diff --git a/javadoc/org/apache/iceberg/aws/package-tree.html b/javadoc/org/apache/iceberg/aws/package-tree.html index a4212e06e..13baf2351 100644 --- a/javadoc/org/apache/iceberg/aws/package-tree.html +++ b/javadoc/org/apache/iceberg/aws/package-tree.html @@ -81,8 +81,11 @@

    Class Hierarchy

    diff --git a/javadoc/org/apache/iceberg/aws/s3/S3FileIO.html b/javadoc/org/apache/iceberg/aws/s3/S3FileIO.html index e94e1a047..dac449a50 100644 --- a/javadoc/org/apache/iceberg/aws/s3/S3FileIO.html +++ b/javadoc/org/apache/iceberg/aws/s3/S3FileIO.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -48,7 +48,7 @@ @@ -117,7 +118,7 @@

    Interface Hierarchy

    @@ -132,19 +132,37 @@

    Nested Class Summary

    - +

    Classes

      +
    • AesGcmInputFile
    • +
    • AesGcmInputStream
    • +
    • AesGcmOutputFile
    • +
    • AesGcmOutputStream
    • Ciphers
    • Ciphers.AesGcmDecryptor
    • Ciphers.AesGcmEncryptor
    • diff --git a/javadoc/org/apache/iceberg/encryption/package-summary.html b/javadoc/org/apache/iceberg/encryption/package-summary.html index ed1a6bffb..dd57a6a15 100644 --- a/javadoc/org/apache/iceberg/encryption/package-summary.html +++ b/javadoc/org/apache/iceberg/encryption/package-summary.html @@ -132,6 +132,22 @@

      Package org.apache.iceberg.encryption

      +AesGcmInputFile +  + + +AesGcmInputStream +  + + +AesGcmOutputFile +  + + +AesGcmOutputStream +  + + Ciphers   diff --git a/javadoc/org/apache/iceberg/encryption/package-tree.html b/javadoc/org/apache/iceberg/encryption/package-tree.html index 1a65c3f72..6e71ef214 100644 --- a/javadoc/org/apache/iceberg/encryption/package-tree.html +++ b/javadoc/org/apache/iceberg/encryption/package-tree.html @@ -79,15 +79,35 @@

      Class Hierarchy

      • java.lang.Object
      • diff --git a/javadoc/org/apache/iceberg/exceptions/BadRequestException.html b/javadoc/org/apache/iceberg/exceptions/BadRequestException.html index b1ce15f1b..78ac032d8 100644 --- a/javadoc/org/apache/iceberg/exceptions/BadRequestException.html +++ b/javadoc/org/apache/iceberg/exceptions/BadRequestException.html @@ -116,12 +116,13 @@

        Class BadRequestException
        All Implemented Interfaces:
        -
        java.io.Serializable
        +
        java.io.Serializable, CleanableFailure


        public class BadRequestException
        -extends java.lang.RuntimeException
        +extends java.lang.RuntimeException +implements CleanableFailure
        Exception thrown on HTTP 400 - Bad Request
        See Also:
        diff --git a/javadoc/org/apache/iceberg/exceptions/CherrypickAncestorCommitException.html b/javadoc/org/apache/iceberg/exceptions/CherrypickAncestorCommitException.html index 826908d69..bb576a905 100644 --- a/javadoc/org/apache/iceberg/exceptions/CherrypickAncestorCommitException.html +++ b/javadoc/org/apache/iceberg/exceptions/CherrypickAncestorCommitException.html @@ -42,7 +42,7 @@

      + + + +
        +
      • +

        toSanitizedString

        +
        public static java.lang.String toSanitizedString(Types.StructType struct,
        +                                                 Expression expr,
        +                                                 boolean caseSensitive)
        +
        Produces a sanitized expression string with the same structure, but with data values replaced + by descriptions. + +

        Numbers are replaced with magnitude and type, string-like values are replaced by hashes, and + date/time values are replaced by the type.

        +
        +
        Parameters:
        +
        struct - a StructType to bind the expression
        +
        expr - an Expression to sanitize
        +
        caseSensitive - whether to bind case sensitively
        +
        Returns:
        +
        a sanitized expression string
        +
        +
      • +
      @@ -341,12 +415,30 @@

      selectsPartitions

      - diff --git a/javadoc/org/apache/iceberg/flink/CatalogLoader.CustomCatalogLoader.html b/javadoc/org/apache/iceberg/flink/CatalogLoader.CustomCatalogLoader.html index 9dcca0e89..5d23ed3c2 100644 --- a/javadoc/org/apache/iceberg/flink/CatalogLoader.CustomCatalogLoader.html +++ b/javadoc/org/apache/iceberg/flink/CatalogLoader.CustomCatalogLoader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10}; +var methods = {"i0":10,"i1":10,"i2":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -107,7 +107,7 @@

      Class CatalogL
    • All Implemented Interfaces:
      -
      java.io.Serializable, CatalogLoader
      +
      java.io.Serializable, java.lang.Cloneable, CatalogLoader
      Enclosing interface:
      @@ -156,12 +156,18 @@

      Method Summary

      Method and Description +CatalogLoader +clone() +
      Clone a CatalogLoader.
      + + + Catalog loadCatalog()
      Create a new catalog with the provided properties.
      - + java.lang.String toString()  @@ -171,7 +177,7 @@

      Method Summary

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
    • +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait

    + + + +
      +
    • +

      clone

      +
      public CatalogLoader clone()
      +
      Description copied from interface: CatalogLoader
      +
      Clone a CatalogLoader.
      +
      +
      Specified by:
      +
      clone in interface CatalogLoader
      +
      Overrides:
      +
      clone in class java.lang.Object
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/flink/CatalogLoader.HadoopCatalogLoader.html b/javadoc/org/apache/iceberg/flink/CatalogLoader.HadoopCatalogLoader.html index 21cbe923f..f18bdcd60 100644 --- a/javadoc/org/apache/iceberg/flink/CatalogLoader.HadoopCatalogLoader.html +++ b/javadoc/org/apache/iceberg/flink/CatalogLoader.HadoopCatalogLoader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10}; +var methods = {"i0":10,"i1":10,"i2":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -107,7 +107,7 @@

    Class CatalogL
  • All Implemented Interfaces:
    -
    java.io.Serializable, CatalogLoader
    +
    java.io.Serializable, java.lang.Cloneable, CatalogLoader
    Enclosing interface:
    @@ -156,12 +156,18 @@

    Method Summary

    Method and Description +CatalogLoader +clone() +
    Clone a CatalogLoader.
    + + + Catalog loadCatalog()
    Create a new catalog with the provided properties.
    - + java.lang.String toString()  @@ -171,7 +177,7 @@

    Method Summary

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
  • +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait + + + +
      +
    • +

      clone

      +
      public CatalogLoader clone()
      +
      Description copied from interface: CatalogLoader
      +
      Clone a CatalogLoader.
      +
      +
      Specified by:
      +
      clone in interface CatalogLoader
      +
      Overrides:
      +
      clone in class java.lang.Object
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/flink/CatalogLoader.HiveCatalogLoader.html b/javadoc/org/apache/iceberg/flink/CatalogLoader.HiveCatalogLoader.html index 88b0eb435..2b9dbc99f 100644 --- a/javadoc/org/apache/iceberg/flink/CatalogLoader.HiveCatalogLoader.html +++ b/javadoc/org/apache/iceberg/flink/CatalogLoader.HiveCatalogLoader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10}; +var methods = {"i0":10,"i1":10,"i2":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -107,7 +107,7 @@

    Class CatalogLoa
  • All Implemented Interfaces:
    -
    java.io.Serializable, CatalogLoader
    +
    java.io.Serializable, java.lang.Cloneable, CatalogLoader
    Enclosing interface:
    @@ -156,12 +156,18 @@

    Method Summary

    Method and Description +CatalogLoader +clone() +
    Clone a CatalogLoader.
    + + + Catalog loadCatalog()
    Create a new catalog with the provided properties.
    - + java.lang.String toString()  @@ -171,7 +177,7 @@

    Method Summary

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
  • +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait + + + +
      +
    • +

      clone

      +
      public CatalogLoader clone()
      +
      Description copied from interface: CatalogLoader
      +
      Clone a CatalogLoader.
      +
      +
      Specified by:
      +
      clone in interface CatalogLoader
      +
      Overrides:
      +
      clone in class java.lang.Object
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/flink/CatalogLoader.RESTCatalogLoader.html b/javadoc/org/apache/iceberg/flink/CatalogLoader.RESTCatalogLoader.html index 5eb0735dd..54f1fffda 100644 --- a/javadoc/org/apache/iceberg/flink/CatalogLoader.RESTCatalogLoader.html +++ b/javadoc/org/apache/iceberg/flink/CatalogLoader.RESTCatalogLoader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10}; +var methods = {"i0":10,"i1":10,"i2":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -107,7 +107,7 @@

    Class CatalogLoa
  • All Implemented Interfaces:
    -
    java.io.Serializable, CatalogLoader
    +
    java.io.Serializable, java.lang.Cloneable, CatalogLoader
    Enclosing interface:
    @@ -156,12 +156,18 @@

    Method Summary

    Method and Description +CatalogLoader +clone() +
    Clone a CatalogLoader.
    + + + Catalog loadCatalog()
    Create a new catalog with the provided properties.
    - + java.lang.String toString()  @@ -171,7 +177,7 @@

    Method Summary

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
  • +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait + + + +
      +
    • +

      clone

      +
      public CatalogLoader clone()
      +
      Description copied from interface: CatalogLoader
      +
      Clone a CatalogLoader.
      +
      +
      Specified by:
      +
      clone in interface CatalogLoader
      +
      Overrides:
      +
      clone in class java.lang.Object
      +
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/flink/CatalogLoader.html b/javadoc/org/apache/iceberg/flink/CatalogLoader.html index a545f4680..71cbb71af 100644 --- a/javadoc/org/apache/iceberg/flink/CatalogLoader.html +++ b/javadoc/org/apache/iceberg/flink/CatalogLoader.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":17,"i1":17,"i2":17,"i3":6,"i4":17}; +var methods = {"i0":6,"i1":17,"i2":17,"i3":17,"i4":6,"i5":17}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -99,7 +99,7 @@

    Interface CatalogLoader

  • All Superinterfaces:
    -
    java.io.Serializable
    +
    java.lang.Cloneable, java.io.Serializable
    All Known Implementing Classes:
    @@ -108,7 +108,7 @@

    Interface CatalogLoader



    public interface CatalogLoader
    -extends java.io.Serializable
    +extends java.io.Serializable, java.lang.Cloneable
    Serializable loader to load an Iceberg Catalog.
  • @@ -160,31 +160,37 @@

    Method Summary

    Method and Description +CatalogLoader +clone() +
    Clone a CatalogLoader.
    + + + static CatalogLoader custom(java.lang.String name, java.util.Map<java.lang.String,java.lang.String> properties, org.apache.hadoop.conf.Configuration hadoopConf, java.lang.String impl)  - + static CatalogLoader hadoop(java.lang.String name, org.apache.hadoop.conf.Configuration hadoopConf, java.util.Map<java.lang.String,java.lang.String> properties)  - + static CatalogLoader hive(java.lang.String name, org.apache.hadoop.conf.Configuration hadoopConf, java.util.Map<java.lang.String,java.lang.String> properties)  - + Catalog loadCatalog()
    Create a new catalog with the provided properties.
    - + static CatalogLoader rest(java.lang.String name, org.apache.hadoop.conf.Configuration hadoopConf, @@ -222,6 +228,16 @@

    loadCatalog

    + + + + diff --git a/javadoc/org/apache/iceberg/flink/FlinkCatalog.html b/javadoc/org/apache/iceberg/flink/FlinkCatalog.html index e0303efd0..a5c5157db 100644 --- a/javadoc/org/apache/iceberg/flink/FlinkCatalog.html +++ b/javadoc/org/apache/iceberg/flink/FlinkCatalog.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -204,168 +204,177 @@

    Method Summary

    void alterTable(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogBaseTable newTable, - boolean ignoreIfNotExists)  + boolean ignoreIfNotExists)
    +
    This alterTable API only supports altering table properties.
    + void +alterTable(org.apache.flink.table.catalog.ObjectPath tablePath, + org.apache.flink.table.catalog.CatalogBaseTable newTable, + java.util.List<org.apache.flink.table.catalog.TableChange> tableChanges, + boolean ignoreIfNotExists)  + + +void alterTableColumnStatistics(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.stats.CatalogColumnStatistics columnStatistics, boolean ignoreIfNotExists)  - + void alterTableStatistics(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.stats.CatalogTableStatistics tableStatistics, boolean ignoreIfNotExists)  - + Catalog catalog()  - + void close()  - + void createDatabase(java.lang.String name, org.apache.flink.table.catalog.CatalogDatabase database, boolean ignoreIfExists)  - + void createFunction(org.apache.flink.table.catalog.ObjectPath functionPath, org.apache.flink.table.catalog.CatalogFunction function, boolean ignoreIfExists)  - + void createPartition(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec, org.apache.flink.table.catalog.CatalogPartition partition, boolean ignoreIfExists)  - + void createTable(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogBaseTable table, boolean ignoreIfExists)  - + boolean databaseExists(java.lang.String databaseName)  - + void dropDatabase(java.lang.String name, boolean ignoreIfNotExists, boolean cascade)  - + void dropFunction(org.apache.flink.table.catalog.ObjectPath functionPath, boolean ignoreIfNotExists)  - + void dropPartition(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec, boolean ignoreIfNotExists)  - + void dropTable(org.apache.flink.table.catalog.ObjectPath tablePath, boolean ignoreIfNotExists)  - + boolean functionExists(org.apache.flink.table.catalog.ObjectPath functionPath)  - + org.apache.flink.table.catalog.CatalogDatabase getDatabase(java.lang.String databaseName)  - + java.util.Optional<org.apache.flink.table.factories.Factory> getFactory()  - + org.apache.flink.table.catalog.CatalogFunction getFunction(org.apache.flink.table.catalog.ObjectPath functionPath)  - + org.apache.flink.table.catalog.CatalogPartition getPartition(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec)  - + org.apache.flink.table.catalog.stats.CatalogColumnStatistics getPartitionColumnStatistics(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec)  - + org.apache.flink.table.catalog.stats.CatalogTableStatistics getPartitionStatistics(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec)  - + org.apache.flink.table.catalog.CatalogTable getTable(org.apache.flink.table.catalog.ObjectPath tablePath)  - + org.apache.flink.table.catalog.stats.CatalogColumnStatistics getTableColumnStatistics(org.apache.flink.table.catalog.ObjectPath tablePath)  - + org.apache.flink.table.catalog.stats.CatalogTableStatistics getTableStatistics(org.apache.flink.table.catalog.ObjectPath tablePath)  - + java.util.List<java.lang.String> listDatabases()  - + java.util.List<java.lang.String> listFunctions(java.lang.String dbName)  - + java.util.List<org.apache.flink.table.catalog.CatalogPartitionSpec> listPartitions(org.apache.flink.table.catalog.ObjectPath tablePath)  - + java.util.List<org.apache.flink.table.catalog.CatalogPartitionSpec> listPartitions(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec)  - + java.util.List<org.apache.flink.table.catalog.CatalogPartitionSpec> listPartitionsByFilter(org.apache.flink.table.catalog.ObjectPath tablePath, java.util.List<org.apache.flink.table.expressions.Expression> filters)  - + java.util.List<java.lang.String> listTables(java.lang.String databaseName)  - + java.util.List<java.lang.String> listViews(java.lang.String databaseName)  - + void open()  - + boolean partitionExists(org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogPartitionSpec partitionSpec)  - + void renameTable(org.apache.flink.table.catalog.ObjectPath tablePath, java.lang.String newTableName, boolean ignoreIfNotExists)  - + boolean tableExists(org.apache.flink.table.catalog.ObjectPath tablePath)  @@ -676,10 +685,40 @@

    alterTable

    boolean ignoreIfNotExists) throws org.apache.flink.table.catalog.exceptions.CatalogException, org.apache.flink.table.catalog.exceptions.TableNotExistException +
    This alterTable API only supports altering table properties. + +

    Support for adding/removing/renaming columns cannot be done by comparing CatalogTable + instances, unless the Flink schema contains Iceberg column IDs. + +

    To alter columns, use the other alterTable API and provide a list of TableChange's.

    +
    +
    Parameters:
    +
    tablePath - path of the table or view to be modified
    +
    newTable - the new table definition
    +
    ignoreIfNotExists - flag to specify behavior when the table or view does not exist: if set + to false, throw an exception, if set to true, do nothing.
    +
    Throws:
    +
    org.apache.flink.table.catalog.exceptions.CatalogException - in case of any runtime exception
    +
    org.apache.flink.table.catalog.exceptions.TableNotExistException - if the table does not exist
    +
    + + + + + +
      +
    • +

      alterTable

      +
      public void alterTable(org.apache.flink.table.catalog.ObjectPath tablePath,
      +                       org.apache.flink.table.catalog.CatalogBaseTable newTable,
      +                       java.util.List<org.apache.flink.table.catalog.TableChange> tableChanges,
      +                       boolean ignoreIfNotExists)
      +                throws org.apache.flink.table.catalog.exceptions.TableNotExistException,
      +                       org.apache.flink.table.catalog.exceptions.CatalogException
      Throws:
      -
      org.apache.flink.table.catalog.exceptions.CatalogException
      org.apache.flink.table.catalog.exceptions.TableNotExistException
      +
      org.apache.flink.table.catalog.exceptions.CatalogException
    diff --git a/javadoc/org/apache/iceberg/flink/FlinkReadConf.html b/javadoc/org/apache/iceberg/flink/FlinkReadConf.html index 12f0a7d41..90f85a4ea 100644 --- a/javadoc/org/apache/iceberg/flink/FlinkReadConf.html +++ b/javadoc/org/apache/iceberg/flink/FlinkReadConf.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -176,57 +176,61 @@

    Method Summary

    int -maxPlanningSnapshotCount()  +maxAllowedPlanningFailures()  +int +maxPlanningSnapshotCount()  + + java.time.Duration monitorInterval()  - + java.lang.String nameMapping()  - + java.lang.Long snapshotId()  - + long splitFileOpenCost()  - + int splitLookback()  - + long splitSize()  - + StreamingStartingStrategy startingStrategy()  - + java.lang.Long startSnapshotId()  - + java.lang.Long startSnapshotTimestamp()  - + java.lang.String startTag()  - + boolean streaming()  - + java.lang.String tag()  - + int workerPoolSize()  @@ -454,12 +458,21 @@

    limit

    -
      +
      • workerPoolSize

        public int workerPoolSize()
      + + + +
        +
      • +

        maxAllowedPlanningFailures

        +
        public int maxAllowedPlanningFailures()
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/flink/FlinkReadOptions.html b/javadoc/org/apache/iceberg/flink/FlinkReadOptions.html index 07be9e2cc..577df58ab 100644 --- a/javadoc/org/apache/iceberg/flink/FlinkReadOptions.html +++ b/javadoc/org/apache/iceberg/flink/FlinkReadOptions.html @@ -164,6 +164,14 @@

    Field Summary

    static java.lang.String +MAX_ALLOWED_PLANNING_FAILURES  + + +static org.apache.flink.configuration.ConfigOption<java.lang.Integer> +MAX_ALLOWED_PLANNING_FAILURES_OPTION  + + +static java.lang.String MAX_PLANNING_SNAPSHOT_COUNT  @@ -563,12 +571,34 @@

    LIMIT

    -
      +
      • LIMIT_OPTION

        public static final org.apache.flink.configuration.ConfigOption<java.lang.Long> LIMIT_OPTION
      + + + +
        +
      • +

        MAX_ALLOWED_PLANNING_FAILURES

        +
        public static final java.lang.String MAX_ALLOWED_PLANNING_FAILURES
        +
        +
        See Also:
        +
        Constant Field Values
        +
        +
      • +
      + + + +
        +
      • +

        MAX_ALLOWED_PLANNING_FAILURES_OPTION

        +
        public static final org.apache.flink.configuration.ConfigOption<java.lang.Integer> MAX_ALLOWED_PLANNING_FAILURES_OPTION
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/flink/FlinkSchemaUtil.html b/javadoc/org/apache/iceberg/flink/FlinkSchemaUtil.html index 9145d9e09..0479b7085 100644 --- a/javadoc/org/apache/iceberg/flink/FlinkSchemaUtil.html +++ b/javadoc/org/apache/iceberg/flink/FlinkSchemaUtil.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -48,7 +48,7 @@ diff --git a/javadoc/org/apache/iceberg/hive/HiveCatalog.html b/javadoc/org/apache/iceberg/hive/HiveCatalog.html index 108eac823..e7ff29d1f 100644 --- a/javadoc/org/apache/iceberg/hive/HiveCatalog.html +++ b/javadoc/org/apache/iceberg/hive/HiveCatalog.html @@ -253,7 +253,7 @@

    Method Summary

    java.util.List<Namespace> listNamespaces(Namespace namespace) -
    List namespaces from the namespace.
    +
    List child namespaces from the namespace.
    @@ -572,15 +572,34 @@

    createNamespace

    listNamespaces

    public java.util.List<Namespace> listNamespaces(Namespace namespace)
    Description copied from interface: SupportsNamespaces
    -
    List namespaces from the namespace. +
    List child namespaces from the namespace. -

    For example, if table a.b.t exists, use 'SELECT NAMESPACE IN a' this method must return - Namepace.of("a","b") Namespace.

    +

    For two existing tables named 'a.b.c.table' and 'a.b.d.table', this method returns: + +

      +
    • Given: Namespace.empty() +
    • Returns: Namespace.of("a") +
    + +
      +
    • Given: Namespace.of("a") +
    • Returns: Namespace.of("a", "b") +
    + +
      +
    • Given: Namespace.of("a", "b") +
    • Returns: Namespace.of("a", "b", "c") and Namespace.of("a", "b", "d") +
    + +
      +
    • Given: Namespace.of("a", "b", "c") +
    • Returns: empty list, because there are no child namespaces +
    Specified by:
    listNamespaces in interface SupportsNamespaces
    Returns:
    -
    a List of namespace Namespace names
    +
    a List of child Namespace names from the given namespace
    diff --git a/javadoc/org/apache/iceberg/hive/HiveClientPool.html b/javadoc/org/apache/iceberg/hive/HiveClientPool.html index 04226a296..7368924ab 100644 --- a/javadoc/org/apache/iceberg/hive/HiveClientPool.html +++ b/javadoc/org/apache/iceberg/hive/HiveClientPool.html @@ -191,7 +191,7 @@

    Method Summary

    Methods inherited from class org.apache.iceberg.ClientPoolImpl

    -close, poolSize, run, run +close, isClosed, poolSize, run, run diff --git a/javadoc/org/apache/iceberg/hive/MetastoreUtil.html b/javadoc/org/apache/iceberg/hive/MetastoreUtil.html index 160f72cf5..ba8eb4e0b 100644 --- a/javadoc/org/apache/iceberg/hive/MetastoreUtil.html +++ b/javadoc/org/apache/iceberg/hive/MetastoreUtil.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9}; +var methods = {"i0":9,"i1":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -48,7 +48,7 @@ +
    + +
    + + + + + + + + diff --git a/javadoc/org/apache/iceberg/io/FileAppender.html b/javadoc/org/apache/iceberg/io/FileAppender.html index 74081dd25..1b75e0f22 100644 --- a/javadoc/org/apache/iceberg/io/FileAppender.html +++ b/javadoc/org/apache/iceberg/io/FileAppender.html @@ -47,7 +47,7 @@ - + OutputFile newOutputFile(java.lang.String location)
    Get a OutputFile instance to write bytes to the file at the given path.
    - + java.util.Map<java.lang.String,java.lang.String> properties()
    Returns the property map used to configure this FileIO
    - + void serializeConfWith(java.util.function.Function<org.apache.hadoop.conf.Configuration,SerializableSupplier<org.apache.hadoop.conf.Configuration>> confSerializer)
    Take a function that serializes Hadoop configuration into a supplier.
    - + void setConf(org.apache.hadoop.conf.Configuration conf)  @@ -224,7 +248,7 @@

    Method Summary

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait +clone, equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait + + + + @@ -432,12 +476,70 @@

    getConf

    -
      +
      • ioClass

        public java.lang.Class<?> ioClass(java.lang.String location)
      + + + +
        +
      • +

        finalize

        +
        protected void finalize()
        +                 throws java.lang.Throwable
        +
        +
        Overrides:
        +
        finalize in class java.lang.Object
        +
        Throws:
        +
        java.lang.Throwable
        +
        +
      • +
      + + + +
        +
      • +

        listPrefix

        +
        public java.lang.Iterable<FileInfo> listPrefix(java.lang.String prefix)
        +
        Description copied from interface: SupportsPrefixOperations
        +
        Return an iterable of all files under a prefix. + +

        Hierarchical file systems (e.g. HDFS) may impose additional restrictions like the prefix + must fully match a directory whereas key/value object stores may allow for arbitrary prefixes.

        +
        +
        Specified by:
        +
        listPrefix in interface SupportsPrefixOperations
        +
        Parameters:
        +
        prefix - prefix to list
        +
        Returns:
        +
        iterable of file information
        +
        +
      • +
      + + + +
        +
      • +

        deletePrefix

        +
        public void deletePrefix(java.lang.String prefix)
        +
        Description copied from interface: SupportsPrefixOperations
        +
        Delete all files under a prefix. + +

        Hierarchical file systems (e.g. HDFS) may impose additional restrictions like the prefix + must fully match a directory whereas key/value object stores may allow for arbitrary prefixes.

        +
        +
        Specified by:
        +
        deletePrefix in interface SupportsPrefixOperations
        +
        Parameters:
        +
        prefix - prefix to delete
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/io/SeekableInputStream.html b/javadoc/org/apache/iceberg/io/SeekableInputStream.html index a7e3a1714..3cd43c4f6 100644 --- a/javadoc/org/apache/iceberg/io/SeekableInputStream.html +++ b/javadoc/org/apache/iceberg/io/SeekableInputStream.html @@ -116,7 +116,7 @@

    Class SeekableInputStream
    Direct Known Subclasses:
    -
    ByteBufferInputStream
    +
    AesGcmInputStream, ByteBufferInputStream


    diff --git a/javadoc/org/apache/iceberg/io/SupportsBulkOperations.html b/javadoc/org/apache/iceberg/io/SupportsBulkOperations.html index 5c3f2fbea..8a57bb6e7 100644 --- a/javadoc/org/apache/iceberg/io/SupportsBulkOperations.html +++ b/javadoc/org/apache/iceberg/io/SupportsBulkOperations.html @@ -102,8 +102,12 @@

    Interface SupportsBul
    java.lang.AutoCloseable, java.io.Closeable, FileIO, java.io.Serializable
    +
    All Known Subinterfaces:
    +
    DelegateFileIO
    +
    +
    All Known Implementing Classes:
    -
    HadoopFileIO, S3FileIO
    +
    ADLSFileIO, GCSFileIO, HadoopFileIO, ResolvingFileIO, S3FileIO


    diff --git a/javadoc/org/apache/iceberg/io/SupportsPrefixOperations.html b/javadoc/org/apache/iceberg/io/SupportsPrefixOperations.html index 7d3b3c3d5..4b197934d 100644 --- a/javadoc/org/apache/iceberg/io/SupportsPrefixOperations.html +++ b/javadoc/org/apache/iceberg/io/SupportsPrefixOperations.html @@ -102,8 +102,12 @@

    Interface SupportsP
    java.lang.AutoCloseable, java.io.Closeable, FileIO, java.io.Serializable
    +
    All Known Subinterfaces:
    +
    DelegateFileIO
    +
    +
    All Known Implementing Classes:
    -
    HadoopFileIO, S3FileIO
    +
    ADLSFileIO, GCSFileIO, HadoopFileIO, ResolvingFileIO, S3FileIO


    diff --git a/javadoc/org/apache/iceberg/io/package-frame.html b/javadoc/org/apache/iceberg/io/package-frame.html index f06a0bc6b..cc877100d 100644 --- a/javadoc/org/apache/iceberg/io/package-frame.html +++ b/javadoc/org/apache/iceberg/io/package-frame.html @@ -15,6 +15,7 @@

    Interfaces

  • CloseableIterable
  • CloseableIterator
  • CredentialSupplier
  • +
  • DelegateFileIO
  • DelegatingInputStream
  • DelegatingOutputStream
  • EqualityDeltaWriter
  • @@ -51,6 +52,7 @@

    Classes

  • DeleteSchemaUtil
  • DeleteWriteResult
  • FanoutDataWriter
  • +
  • FanoutPositionOnlyDeleteWriter
  • FileInfo
  • FileIOParser
  • FilterIterator
  • diff --git a/javadoc/org/apache/iceberg/io/package-summary.html b/javadoc/org/apache/iceberg/io/package-summary.html index 2c17490a6..e4d979782 100644 --- a/javadoc/org/apache/iceberg/io/package-summary.html +++ b/javadoc/org/apache/iceberg/io/package-summary.html @@ -41,7 +41,7 @@ @@ -123,6 +129,10 @@

    Field Summary

    static java.lang.String +
    CLIENT_API_VERSION  + + +static java.lang.String NESSIE_CONFIG_PREFIX  @@ -134,6 +144,21 @@

    Field Summary

    Method Summary

    + + + + + + + + + + +
    All Methods Static Methods Concrete Methods 
    Modifier and TypeMethod and Description
    static TableMetadataupdateTableMetadataWithNessieSpecificProperties(TableMetadata tableMetadata, + java.lang.String metadataLocation, + org.projectnessie.model.IcebergTable table, + java.lang.String identifier, + org.projectnessie.model.Reference reference) 
    • @@ -158,7 +183,7 @@

      Field Detail

      -
        +
        • NESSIE_CONFIG_PREFIX

          public static final java.lang.String NESSIE_CONFIG_PREFIX
          @@ -168,6 +193,40 @@

          NESSIE_CONFIG_PREFIX

        + + + +
          +
        • +

          CLIENT_API_VERSION

          +
          public static final java.lang.String CLIENT_API_VERSION
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          updateTableMetadataWithNessieSpecificProperties

          +
          public static TableMetadata updateTableMetadataWithNessieSpecificProperties(TableMetadata tableMetadata,
          +                                                                            java.lang.String metadataLocation,
          +                                                                            org.projectnessie.model.IcebergTable table,
          +                                                                            java.lang.String identifier,
          +                                                                            org.projectnessie.model.Reference reference)
          +
        • +
    • @@ -223,13 +282,13 @@

      NESSIE_CONFIG_PREFIX

    • Nested | 
    • Field | 
    • Constr | 
    • -
    • Method
    • +
    • Method
    diff --git a/javadoc/org/apache/iceberg/nessie/package-tree.html b/javadoc/org/apache/iceberg/nessie/package-tree.html index 70cd4973a..ebf1fbfa6 100644 --- a/javadoc/org/apache/iceberg/nessie/package-tree.html +++ b/javadoc/org/apache/iceberg/nessie/package-tree.html @@ -81,7 +81,7 @@

    Class Hierarchy

    @@ -118,6 +119,7 @@

    Classes

  • FileMetadata
  • FileMetadata.Builder
  • Files
  • +
  • FileScanTaskParser
  • FilesTable
  • FilesTable.FilesTableScan
  • FindFiles
  • @@ -145,12 +147,14 @@

    Classes

  • MetadataUpdate.AddSchema
  • MetadataUpdate.AddSnapshot
  • MetadataUpdate.AddSortOrder
  • +
  • MetadataUpdate.AddViewVersion
  • MetadataUpdate.AssignUUID
  • MetadataUpdate.RemoveProperties
  • MetadataUpdate.RemoveSnapshot
  • MetadataUpdate.RemoveSnapshotRef
  • MetadataUpdate.RemoveStatistics
  • MetadataUpdate.SetCurrentSchema
  • +
  • MetadataUpdate.SetCurrentViewVersion
  • MetadataUpdate.SetDefaultPartitionSpec
  • MetadataUpdate.SetDefaultSortOrder
  • MetadataUpdate.SetLocation
  • @@ -185,6 +189,7 @@

    Classes

  • PositionDeletesTable.PositionDeletesBatchScan
  • ReachableFileUtil
  • RefsTable
  • +
  • RollingManifestWriter
  • ScanSummary
  • ScanSummary.Builder
  • ScanSummary.PartitionMetrics
  • @@ -209,9 +214,12 @@

    Classes

  • SortOrder
  • SortOrder.Builder
  • SortOrderParser
  • +
  • SparkDistributedDataScan
  • StaticTableOperations
  • StatisticsFileParser
  • StreamingDelete
  • +
  • SystemConfigs
  • +
  • SystemConfigs.ConfigEntry
  • SystemProperties
  • TableMetadata
  • TableMetadata.Builder
  • @@ -222,6 +230,16 @@

    Classes

  • Transactions
  • UnboundPartitionSpec
  • UnboundSortOrder
  • +
  • UpdateRequirement.AssertCurrentSchemaID
  • +
  • UpdateRequirement.AssertDefaultSortOrderID
  • +
  • UpdateRequirement.AssertDefaultSpecID
  • +
  • UpdateRequirement.AssertLastAssignedFieldId
  • +
  • UpdateRequirement.AssertLastAssignedPartitionId
  • +
  • UpdateRequirement.AssertRefSnapshotID
  • +
  • UpdateRequirement.AssertTableDoesNotExist
  • +
  • UpdateRequirement.AssertTableUUID
  • +
  • UpdateRequirementParser
  • +
  • UpdateRequirements
  • Enums

  • org.apache.iceberg.SnapshotSummary
  • @@ -234,6 +239,8 @@

    Class Hierarchy

  • org.apache.iceberg.StaticTableOperations (implements org.apache.iceberg.TableOperations)
  • org.apache.iceberg.StatisticsFileParser
  • org.apache.iceberg.StreamingDelete (implements org.apache.iceberg.DeleteFiles)
  • +
  • org.apache.iceberg.SystemConfigs
  • +
  • org.apache.iceberg.SystemConfigs.ConfigEntry<T>
  • org.apache.iceberg.SystemProperties
  • org.apache.iceberg.TableMetadata (implements java.io.Serializable)
  • org.apache.iceberg.TableMetadata.Builder
  • @@ -244,6 +251,16 @@

    Class Hierarchy

  • org.apache.iceberg.Transactions
  • org.apache.iceberg.UnboundPartitionSpec
  • org.apache.iceberg.UnboundSortOrder
  • +
  • org.apache.iceberg.UpdateRequirement.AssertCurrentSchemaID (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertDefaultSortOrderID (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertDefaultSpecID (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertLastAssignedFieldId (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertLastAssignedPartitionId (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertRefSnapshotID (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertTableDoesNotExist (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirement.AssertTableUUID (implements org.apache.iceberg.UpdateRequirement)
  • +
  • org.apache.iceberg.UpdateRequirementParser
  • +
  • org.apache.iceberg.UpdateRequirements
  • @@ -379,6 +396,7 @@

    Interface Hierarchy

  • org.apache.iceberg.TableOperations
  • org.apache.iceberg.Tables
  • org.apache.iceberg.Transaction
  • +
  • org.apache.iceberg.UpdateRequirement
  • Enum Hierarchy

    + + + +
      +
    • +

      withFileEncryptionKey

      +
      public Parquet.DataWriteBuilder withFileEncryptionKey(java.nio.ByteBuffer fileEncryptionKey)
      +
    • +
    + + + + diff --git a/javadoc/org/apache/iceberg/parquet/Parquet.DeleteWriteBuilder.html b/javadoc/org/apache/iceberg/parquet/Parquet.DeleteWriteBuilder.html index 407c90fbc..60f8668a7 100644 --- a/javadoc/org/apache/iceberg/parquet/Parquet.DeleteWriteBuilder.html +++ b/javadoc/org/apache/iceberg/parquet/Parquet.DeleteWriteBuilder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -191,18 +191,26 @@

    Method Summary

    Parquet.DeleteWriteBuilder -withKeyMetadata(EncryptionKeyMetadata metadata)  +withAADPrefix(java.nio.ByteBuffer aadPrefix)  Parquet.DeleteWriteBuilder -withPartition(StructLike key)  +withFileEncryptionKey(java.nio.ByteBuffer fileEncryptionKey)  Parquet.DeleteWriteBuilder -withSortOrder(SortOrder newSortOrder)  +withKeyMetadata(EncryptionKeyMetadata metadata)  Parquet.DeleteWriteBuilder +withPartition(StructLike key)  + + +Parquet.DeleteWriteBuilder +withSortOrder(SortOrder newSortOrder)  + + +Parquet.DeleteWriteBuilder withSpec(PartitionSpec newSpec)  @@ -337,6 +345,24 @@

    withKeyMetadata

    public Parquet.DeleteWriteBuilder withKeyMetadata(EncryptionKeyMetadata metadata)
    + + + + + + + + diff --git a/javadoc/org/apache/iceberg/parquet/Parquet.ReadBuilder.html b/javadoc/org/apache/iceberg/parquet/Parquet.ReadBuilder.html index 1cb54e16e..cfe5714d7 100644 --- a/javadoc/org/apache/iceberg/parquet/Parquet.ReadBuilder.html +++ b/javadoc/org/apache/iceberg/parquet/Parquet.ReadBuilder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -193,6 +193,14 @@

    Method Summary

    Parquet.ReadBuilder +withAADPrefix(java.nio.ByteBuffer aadPrefix)  + + +Parquet.ReadBuilder +withFileEncryptionKey(java.nio.ByteBuffer encryptionKey)  + + +Parquet.ReadBuilder withNameMapping(NameMapping newNameMapping)  @@ -353,6 +361,24 @@

    withNameMapping

    public Parquet.ReadBuilder withNameMapping(NameMapping newNameMapping)
    + + + +
      +
    • +

      withFileEncryptionKey

      +
      public Parquet.ReadBuilder withFileEncryptionKey(java.nio.ByteBuffer encryptionKey)
      +
    • +
    + + + +
      +
    • +

      withAADPrefix

      +
      public Parquet.ReadBuilder withAADPrefix(java.nio.ByteBuffer aadPrefix)
      +
    • +
    diff --git a/javadoc/org/apache/iceberg/parquet/Parquet.WriteBuilder.html b/javadoc/org/apache/iceberg/parquet/Parquet.WriteBuilder.html index fdf3042ee..abc5cbce1 100644 --- a/javadoc/org/apache/iceberg/parquet/Parquet.WriteBuilder.html +++ b/javadoc/org/apache/iceberg/parquet/Parquet.WriteBuilder.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -179,10 +179,18 @@

    Method Summary

    Parquet.WriteBuilder -writerVersion(org.apache.parquet.column.ParquetProperties.WriterVersion version)  +withAADPrefix(java.nio.ByteBuffer aadPrefix)  Parquet.WriteBuilder +withFileEncryptionKey(java.nio.ByteBuffer encryptionKey)  + + +Parquet.WriteBuilder +writerVersion(org.apache.parquet.column.ParquetProperties.WriterVersion version)  + + +Parquet.WriteBuilder writeSupport(org.apache.parquet.hadoop.api.WriteSupport<?> newWriteSupport)  @@ -317,6 +325,24 @@

    writerVersion

    public Parquet.WriteBuilder writerVersion(org.apache.parquet.column.ParquetProperties.WriterVersion version)
    + + + +
      +
    • +

      withFileEncryptionKey

      +
      public Parquet.WriteBuilder withFileEncryptionKey(java.nio.ByteBuffer encryptionKey)
      +
    • +
    + + + + diff --git a/javadoc/org/apache/iceberg/parquet/ParquetCodecFactory.html b/javadoc/org/apache/iceberg/parquet/ParquetCodecFactory.html index f00b54f16..e1b4a43a9 100644 --- a/javadoc/org/apache/iceberg/parquet/ParquetCodecFactory.html +++ b/javadoc/org/apache/iceberg/parquet/ParquetCodecFactory.html @@ -119,7 +119,8 @@

    Class ParquetCodecFactorypublic class ParquetCodecFactory extends org.apache.parquet.hadoop.CodecFactory
    This class implements a codec factory that is used when reading from Parquet. It adds a - workaround for memory issues encountered when reading from zstd-compressed files.
    + workaround to cache codecs by name and level, not just by name. This can be removed when this + change is made to Parquet. @@ -194,8 +195,10 @@

    Method Summary

    Method and Description -protected org.apache.parquet.hadoop.CodecFactory.BytesDecompressor -createDecompressor(org.apache.parquet.hadoop.metadata.CompressionCodecName codecName)  +protected org.apache.hadoop.io.compress.CompressionCodec +getCodec(org.apache.parquet.hadoop.metadata.CompressionCodecName codecName) +
    This is copied from CodecFactory and modified to include the level in the cache key.
    +
      @@ -203,7 +206,7 @@

      Method Summary

      Methods inherited from class org.apache.parquet.hadoop.CodecFactory

      -createCompressor, createDirectCodecFactory, getCodec, getCompressor, getDecompressor, release +createCompressor, createDecompressor, createDirectCodecFactory, getCompressor, getDecompressor, release
    • @@ -244,16 +247,17 @@

      ParquetCodecFactory

      Method Detail

      - +
      • -

        createDecompressor

        -
        protected org.apache.parquet.hadoop.CodecFactory.BytesDecompressor createDecompressor(org.apache.parquet.hadoop.metadata.CompressionCodecName codecName)
        +

        getCodec

        +
        protected org.apache.hadoop.io.compress.CompressionCodec getCodec(org.apache.parquet.hadoop.metadata.CompressionCodecName codecName)
        +
        This is copied from CodecFactory and modified to include the level in the cache key.
        Overrides:
        -
        createDecompressor in class org.apache.parquet.hadoop.CodecFactory
        +
        getCodec in class org.apache.parquet.hadoop.CodecFactory
      diff --git a/javadoc/org/apache/iceberg/parquet/ParquetUtil.html b/javadoc/org/apache/iceberg/parquet/ParquetUtil.html index 28f66740d..4ef6e08a8 100644 --- a/javadoc/org/apache/iceberg/parquet/ParquetUtil.html +++ b/javadoc/org/apache/iceberg/parquet/ParquetUtil.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -128,49 +128,55 @@

      Method Summary

      Method and Description +static long +extractTimestampInt96(java.nio.ByteBuffer buffer) +
      Method to read timestamp (parquet Int96) from bytebuffer.
      + + + static Metrics fileMetrics(InputFile file, MetricsConfig metricsConfig)  - + static Metrics fileMetrics(InputFile file, MetricsConfig metricsConfig, NameMapping nameMapping)  - + static Metrics footerMetrics(org.apache.parquet.hadoop.metadata.ParquetMetadata metadata, java.util.stream.Stream<FieldMetrics<?>> fieldMetrics, MetricsConfig metricsConfig)  - + static Metrics footerMetrics(org.apache.parquet.hadoop.metadata.ParquetMetadata metadata, java.util.stream.Stream<FieldMetrics<?>> fieldMetrics, MetricsConfig metricsConfig, NameMapping nameMapping)  - + static java.util.List<java.lang.Long> getSplitOffsets(org.apache.parquet.hadoop.metadata.ParquetMetadata md)
      Returns a list of offsets in ascending order determined by the starting position of the row groups.
      - + static boolean hasNoBloomFilterPages(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData meta)  - + static boolean hasNonDictionaryPages(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData meta)  - + static boolean isIntType(org.apache.parquet.schema.PrimitiveType primitiveType)  - + static org.apache.parquet.column.Dictionary readDictionary(org.apache.parquet.column.ColumnDescriptor desc, org.apache.parquet.column.page.PageReader pageSource)  @@ -283,12 +289,23 @@

      readDictionary

      -
        +
        • isIntType

          public static boolean isIntType(org.apache.parquet.schema.PrimitiveType primitiveType)
        + + + +
          +
        • +

          extractTimestampInt96

          +
          public static long extractTimestampInt96(java.nio.ByteBuffer buffer)
          +
          Method to read timestamp (parquet Int96) from bytebuffer. Read 12 bytes in byteBuffer: 8 bytes + (time of day nanos) + 4 bytes(julianDay)
          +
        • +
    • diff --git a/javadoc/org/apache/iceberg/rest/CatalogHandlers.html b/javadoc/org/apache/iceberg/rest/CatalogHandlers.html index dd8be2eab..4043c2433 100644 --- a/javadoc/org/apache/iceberg/rest/CatalogHandlers.html +++ b/javadoc/org/apache/iceberg/rest/CatalogHandlers.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -174,23 +174,29 @@

      Method Summary

      TableIdentifier ident)
        +static LoadTableResponse +registerTable(Catalog catalog, + Namespace namespace, + RegisterTableRequest request)  + + static void renameTable(Catalog catalog, RenameTableRequest request)  - + static LoadTableResponse stageTableCreate(Catalog catalog, Namespace namespace, CreateTableRequest request)  - + static UpdateNamespacePropertiesResponse updateNamespaceProperties(SupportsNamespaces catalog, Namespace namespace, UpdateNamespacePropertiesRequest request)  - + static LoadTableResponse updateTable(Catalog catalog, TableIdentifier ident, @@ -301,6 +307,17 @@

      createTable

      CreateTableRequest request)
    + + + + diff --git a/javadoc/org/apache/iceberg/rest/HTTPClient.html b/javadoc/org/apache/iceberg/rest/HTTPClient.html index 62ec90cf1..abd024d0b 100644 --- a/javadoc/org/apache/iceberg/rest/HTTPClient.html +++ b/javadoc/org/apache/iceberg/rest/HTTPClient.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":41,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10}; -var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10}; +var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -147,35 +147,27 @@

    Nested Class Summary

    Method Summary

    - + - - - - - + - + - + - + - + - + - + - + - +
    All Methods Static Methods Instance Methods Concrete Methods Deprecated Methods All Methods Static Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    static HTTPClient.Builderbuilder() -
    Deprecated.  -
    will be removed in 1.3.0; use builder(Map)
    -
    -
    static HTTPClient.Builder builder(java.util.Map<java.lang.String,java.lang.String> properties) 
    void close() 
    <T extends RESTResponse>
    T
    delete(java.lang.String path, java.lang.Class<T> responseType, java.util.Map<java.lang.String,java.lang.String> headers, java.util.function.Consumer<ErrorResponse> errorHandler) 
    <T extends RESTResponse>
    T
    delete(java.lang.String path, java.util.Map<java.lang.String,java.lang.String> queryParams, @@ -183,7 +175,7 @@

    Method Summary

    java.util.Map<java.lang.String,java.lang.String> headers, java.util.function.Consumer<ErrorResponse> errorHandler)
     
    <T extends RESTResponse>
    T
    get(java.lang.String path, java.util.Map<java.lang.String,java.lang.String> queryParams, @@ -191,13 +183,13 @@

    Method Summary

    java.util.Map<java.lang.String,java.lang.String> headers, java.util.function.Consumer<ErrorResponse> errorHandler)
     
    void head(java.lang.String path, java.util.Map<java.lang.String,java.lang.String> headers, java.util.function.Consumer<ErrorResponse> errorHandler) 
    <T extends RESTResponse>
    T
    post(java.lang.String path, RESTRequest body, @@ -205,7 +197,7 @@

    Method Summary

    java.util.Map<java.lang.String,java.lang.String> headers, java.util.function.Consumer<ErrorResponse> errorHandler)
     
    <T extends RESTResponse>
    T
    post(java.lang.String path, RESTRequest body, @@ -214,7 +206,7 @@

    Method Summary

    java.util.function.Consumer<ErrorResponse> errorHandler, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.String>> responseHeaders)
     
    <T extends RESTResponse>
    T
    postForm(java.lang.String path, java.util.Map<java.lang.String,java.lang.String> formData, @@ -386,21 +378,6 @@

    close

    - - - -
      -
    • -

      builder

      -
      @Deprecated
      -public static HTTPClient.Builder builder()
      -
      Deprecated. will be removed in 1.3.0; use builder(Map)
      -
      -
      Returns:
      -
      http client builder
      -
      -
    • -
    diff --git a/javadoc/org/apache/iceberg/rest/RESTCatalog.html b/javadoc/org/apache/iceberg/rest/RESTCatalog.html index afb1b7918..4e7949ebc 100644 --- a/javadoc/org/apache/iceberg/rest/RESTCatalog.html +++ b/javadoc/org/apache/iceberg/rest/RESTCatalog.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":42,"i30":10,"i31":10,"i32":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -166,7 +166,7 @@

    Constructor Summary

    Method Summary

    - + @@ -184,19 +184,27 @@

    Method Summary

    + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + - + - + - +
    All Methods Instance Methods Concrete Methods Deprecated Methods All Methods Instance Methods Concrete Methods 
    Modifier and Type Method and Description
    voidcommitTransaction(java.util.List<TableCommit> commits) 
    voidcommitTransaction(TableCommit... commits) 
    void createNamespace(Namespace ns, java.util.Map<java.lang.String,java.lang.String> props)
    Create a namespace in the catalog.
    Table createTable(TableIdentifier identifier, Schema schema)
    Create an unpartitioned table.
    Table createTable(TableIdentifier ident, Schema schema, @@ -204,7 +212,7 @@

    Method Summary

    Create a table.
    Table createTable(TableIdentifier ident, Schema schema, @@ -213,7 +221,7 @@

    Method Summary

    Create a table.
    Table createTable(TableIdentifier ident, Schema schema, @@ -223,76 +231,76 @@

    Method Summary

    Create a table.
    boolean dropNamespace(Namespace ns)
    Drop a namespace.
    boolean dropTable(TableIdentifier ident)
    Drop a table and delete all data and metadata files.
    boolean dropTable(TableIdentifier ident, boolean purge)
    Drop a table; optionally delete data and metadata files.
    void initialize(java.lang.String name, java.util.Map<java.lang.String,java.lang.String> props)
    Initialize a catalog given a custom name and a map of catalog properties.
    void invalidateTable(TableIdentifier ident)
    Invalidate cached table metadata from current catalog.
    java.util.List<Namespace> listNamespaces(Namespace ns) -
    List namespaces from the namespace.
    +
    List child namespaces from the namespace.
    java.util.List<TableIdentifier> listTables(Namespace ns)
    Return all the identifiers under this namespace.
    java.util.Map<java.lang.String,java.lang.String> loadNamespaceMetadata(Namespace ns)
    Load metadata properties for a namespace.
    Table loadTable(TableIdentifier ident)
    Load a table.
    java.lang.String name()
    Return the name for this catalog.
    Transaction newCreateTableTransaction(TableIdentifier identifier, Schema schema)
    Start a transaction to create a table.
    Transaction newCreateTableTransaction(TableIdentifier ident, Schema schema, @@ -300,7 +308,7 @@

    Method Summary

    Start a transaction to create a table.
    Transaction newCreateTableTransaction(TableIdentifier ident, Schema schema, @@ -309,7 +317,7 @@

    Method Summary

    Start a transaction to create a table.
    Transaction newCreateTableTransaction(TableIdentifier ident, Schema schema, @@ -319,7 +327,7 @@

    Method Summary

    Start a transaction to create a table.
    Transaction newReplaceTableTransaction(TableIdentifier ident, Schema schema, @@ -327,7 +335,7 @@

    Method Summary

    Start a transaction to replace a table.
    Transaction newReplaceTableTransaction(TableIdentifier ident, Schema schema, @@ -336,7 +344,7 @@

    Method Summary

    Start a transaction to replace a table.
    Transaction newReplaceTableTransaction(TableIdentifier ident, Schema schema, @@ -346,7 +354,7 @@

    Method Summary

    Start a transaction to replace a table.
    Transaction newReplaceTableTransaction(TableIdentifier ident, Schema schema, @@ -357,51 +365,43 @@

    Method Summary

    Start a transaction to replace a table.
    java.util.Map<java.lang.String,java.lang.String> properties() 
    Table registerTable(TableIdentifier ident, java.lang.String metadataFileLocation)
    Register a table with the catalog if it does not exist.
    boolean removeProperties(Namespace ns, java.util.Set<java.lang.String> props)
    Remove a set of property keys from a namespace in the catalog.
    void renameTable(TableIdentifier from, TableIdentifier to)
    Rename a table.
    voidsetConf(org.apache.hadoop.conf.Configuration conf) -
    Deprecated.  -
    will be removed in 1.3.0; use setConf(Object)
    -
    -
    void setConf(java.lang.Object conf) 
    boolean setProperties(Namespace ns, java.util.Map<java.lang.String,java.lang.String> props)
    Set a collection of properties on a namespace in the catalog.
    boolean tableExists(TableIdentifier ident)
    Check whether table exists.
    @@ -1025,15 +1025,34 @@

    listNamespaces

    public java.util.List<Namespace> listNamespaces(Namespace ns)
                                              throws NoSuchNamespaceException
    Description copied from interface: SupportsNamespaces
    -
    List namespaces from the namespace. +
    List child namespaces from the namespace. + +

    For two existing tables named 'a.b.c.table' and 'a.b.d.table', this method returns: + +

      +
    • Given: Namespace.empty() +
    • Returns: Namespace.of("a") +
    -

    For example, if table a.b.t exists, use 'SELECT NAMESPACE IN a' this method must return - Namepace.of("a","b") Namespace.

    +
      +
    • Given: Namespace.of("a") +
    • Returns: Namespace.of("a", "b") +
    + +
      +
    • Given: Namespace.of("a", "b") +
    • Returns: Namespace.of("a", "b", "c") and Namespace.of("a", "b", "d") +
    + +
      +
    • Given: Namespace.of("a", "b", "c") +
    • Returns: empty list, because there are no child namespaces +
    Specified by:
    listNamespaces in interface SupportsNamespaces
    Returns:
    -
    a List of namespace Namespace names
    +
    a List of child Namespace names from the given namespace
    Throws:
    NoSuchNamespaceException - If the namespace does not exist (optional)
    @@ -1144,21 +1163,10 @@

    setConf

    - - - -
      -
    • -

      setConf

      -
      @Deprecated
      -public void setConf(org.apache.hadoop.conf.Configuration conf)
      -
      Deprecated. will be removed in 1.3.0; use setConf(Object)
      -
    • -
    -
      +
      • close

        public void close()
        @@ -1173,6 +1181,24 @@ 

        close

      + + + +
        +
      • +

        commitTransaction

        +
        public void commitTransaction(java.util.List<TableCommit> commits)
        +
      • +
      + + + +
        +
      • +

        commitTransaction

        +
        public void commitTransaction(TableCommit... commits)
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/rest/RESTMessage.html b/javadoc/org/apache/iceberg/rest/RESTMessage.html index a7f029d05..3d21c7d06 100644 --- a/javadoc/org/apache/iceberg/rest/RESTMessage.html +++ b/javadoc/org/apache/iceberg/rest/RESTMessage.html @@ -99,11 +99,11 @@

    Interface RESTMessage

  • All Known Subinterfaces:
    -
    ReportMetricsRequest, RESTRequest, RESTResponse, S3SignRequest, S3SignResponse
    +
    RegisterTableRequest, ReportMetricsRequest, RESTRequest, RESTResponse, S3SignRequest, S3SignResponse
    All Known Implementing Classes:
    -
    ConfigResponse, CreateNamespaceRequest, CreateNamespaceResponse, CreateTableRequest, ErrorResponse, GetNamespaceResponse, ListNamespacesResponse, ListTablesResponse, LoadTableResponse, OAuthTokenResponse, RenameTableRequest, UpdateNamespacePropertiesRequest, UpdateNamespacePropertiesResponse, UpdateTableRequest
    +
    CommitTransactionRequest, ConfigResponse, CreateNamespaceRequest, CreateNamespaceResponse, CreateTableRequest, ErrorResponse, GetNamespaceResponse, ListNamespacesResponse, ListTablesResponse, LoadTableResponse, OAuthTokenResponse, RenameTableRequest, UpdateNamespacePropertiesRequest, UpdateNamespacePropertiesResponse, UpdateTableRequest


    diff --git a/javadoc/org/apache/iceberg/rest/RESTRequest.html b/javadoc/org/apache/iceberg/rest/RESTRequest.html index 9c9f0dd5e..d63cf1872 100644 --- a/javadoc/org/apache/iceberg/rest/RESTRequest.html +++ b/javadoc/org/apache/iceberg/rest/RESTRequest.html @@ -97,11 +97,11 @@

    Interface RESTRequest

    All Known Subinterfaces:
    -
    ReportMetricsRequest, S3SignRequest
    +
    RegisterTableRequest, ReportMetricsRequest, S3SignRequest
    All Known Implementing Classes:
    -
    CreateNamespaceRequest, CreateTableRequest, RenameTableRequest, UpdateNamespacePropertiesRequest, UpdateTableRequest
    +
    CommitTransactionRequest, CreateNamespaceRequest, CreateTableRequest, RenameTableRequest, UpdateNamespacePropertiesRequest, UpdateTableRequest


    diff --git a/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestDeserializer.html b/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestDeserializer.html new file mode 100644 index 000000000..76c891886 --- /dev/null +++ b/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestDeserializer.html @@ -0,0 +1,312 @@ + + + + + +RESTSerializers.CommitTransactionRequestDeserializer + + + + + + + + + + + +
    +
    org.apache.iceberg.rest
    +

    Class RESTSerializers.CommitTransactionRequestDeserializer

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.fasterxml.jackson.databind.JsonDeserializer<CommitTransactionRequest>
      • +
      • +
          +
        • org.apache.iceberg.rest.RESTSerializers.CommitTransactionRequestDeserializer
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.fasterxml.jackson.databind.deser.NullValueProvider
      +
      +
      +
      Enclosing class:
      +
      RESTSerializers
      +
      +
      +
      +
      public static class RESTSerializers.CommitTransactionRequestDeserializer
      +extends com.fasterxml.jackson.databind.JsonDeserializer<CommitTransactionRequest>
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        +
          +
        • + + +

          Nested classes/interfaces inherited from class com.fasterxml.jackson.databind.JsonDeserializer

          +com.fasterxml.jackson.databind.JsonDeserializer.None
        • +
        +
      • +
      + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        CommitTransactionRequestdeserialize(com.fasterxml.jackson.core.JsonParser p, + com.fasterxml.jackson.databind.DeserializationContext context) 
        +
          +
        • + + +

          Methods inherited from class com.fasterxml.jackson.databind.JsonDeserializer

          +deserialize, deserializeWithType, deserializeWithType, findBackReference, getAbsentValue, getDelegatee, getEmptyAccessPattern, getEmptyValue, getEmptyValue, getKnownPropertyNames, getNullAccessPattern, getNullValue, getNullValue, getObjectIdReader, handledType, isCachable, logicalType, replaceDelegatee, supportsUpdate, unwrappingDeserializer
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CommitTransactionRequestDeserializer

          +
          public CommitTransactionRequestDeserializer()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          deserialize

          +
          public CommitTransactionRequest deserialize(com.fasterxml.jackson.core.JsonParser p,
          +                                            com.fasterxml.jackson.databind.DeserializationContext context)
          +                                     throws java.io.IOException
          +
          +
          Specified by:
          +
          deserialize in class com.fasterxml.jackson.databind.JsonDeserializer<CommitTransactionRequest>
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestSerializer.html b/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestSerializer.html new file mode 100644 index 000000000..da84a08ae --- /dev/null +++ b/javadoc/org/apache/iceberg/rest/RESTSerializers.CommitTransactionRequestSerializer.html @@ -0,0 +1,314 @@ + + + + + +RESTSerializers.CommitTransactionRequestSerializer + + + + + + + + + + + +
    +
    org.apache.iceberg.rest
    +

    Class RESTSerializers.CommitTransactionRequestSerializer

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.fasterxml.jackson.databind.JsonSerializer<CommitTransactionRequest>
      • +
      • +
          +
        • org.apache.iceberg.rest.RESTSerializers.CommitTransactionRequestSerializer
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.fasterxml.jackson.databind.jsonFormatVisitors.JsonFormatVisitable
      +
      +
      +
      Enclosing class:
      +
      RESTSerializers
      +
      +
      +
      +
      public static class RESTSerializers.CommitTransactionRequestSerializer
      +extends com.fasterxml.jackson.databind.JsonSerializer<CommitTransactionRequest>
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        +
          +
        • + + +

          Nested classes/interfaces inherited from class com.fasterxml.jackson.databind.JsonSerializer

          +com.fasterxml.jackson.databind.JsonSerializer.None
        • +
        +
      • +
      + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidserialize(CommitTransactionRequest request, + com.fasterxml.jackson.core.JsonGenerator gen, + com.fasterxml.jackson.databind.SerializerProvider serializers) 
        +
          +
        • + + +

          Methods inherited from class com.fasterxml.jackson.databind.JsonSerializer

          +acceptJsonFormatVisitor, getDelegatee, handledType, isEmpty, isEmpty, isUnwrappingSerializer, properties, replaceDelegatee, serializeWithType, unwrappingSerializer, usesObjectId, withFilterId
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CommitTransactionRequestSerializer

          +
          public CommitTransactionRequestSerializer()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          serialize

          +
          public void serialize(CommitTransactionRequest request,
          +                      com.fasterxml.jackson.core.JsonGenerator gen,
          +                      com.fasterxml.jackson.databind.SerializerProvider serializers)
          +               throws java.io.IOException
          +
          +
          Specified by:
          +
          serialize in class com.fasterxml.jackson.databind.JsonSerializer<CommitTransactionRequest>
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/rest/RESTSerializers.ErrorResponseDeserializer.html b/javadoc/org/apache/iceberg/rest/RESTSerializers.ErrorResponseDeserializer.html index 99c9f109b..2c6183cf0 100644 --- a/javadoc/org/apache/iceberg/rest/RESTSerializers.ErrorResponseDeserializer.html +++ b/javadoc/org/apache/iceberg/rest/RESTSerializers.ErrorResponseDeserializer.html @@ -47,7 +47,7 @@
  • Constructor and Description
    UpdateRequirementDeserializer() UpdateRequirementDeserializer() +
    Deprecated. 
    @@ -167,7 +173,7 @@

    Constructor Summary

    Method Summary

    - + @@ -175,7 +181,9 @@

    Method Summary

    + com.fasterxml.jackson.databind.DeserializationContext ctxt) +
    Deprecated. 
    All Methods Instance Methods Concrete Methods All Methods Instance Methods Concrete Methods Deprecated Methods 
    Modifier and Type Method and Description
    UpdateTableRequest.UpdateRequirement deserialize(com.fasterxml.jackson.core.JsonParser p, - com.fasterxml.jackson.databind.DeserializationContext ctxt) 
      @@ -213,6 +221,7 @@

      Constructor Detail

    • UpdateRequirementDeserializer

      public UpdateRequirementDeserializer()
      +
      Deprecated. 
    @@ -232,6 +241,7 @@

    deserialize

    public UpdateTableRequest.UpdateRequirement deserialize(com.fasterxml.jackson.core.JsonParser p,
                                                             com.fasterxml.jackson.databind.DeserializationContext ctxt)
                                                      throws java.io.IOException
    +
    Deprecated. 
    Specified by:
    deserialize in class com.fasterxml.jackson.databind.JsonDeserializer<UpdateTableRequest.UpdateRequirement>
    diff --git a/javadoc/org/apache/iceberg/rest/RESTSerializers.UpdateRequirementSerializer.html b/javadoc/org/apache/iceberg/rest/RESTSerializers.UpdateRequirementSerializer.html index a3ebef8ce..71ff19994 100644 --- a/javadoc/org/apache/iceberg/rest/RESTSerializers.UpdateRequirementSerializer.html +++ b/javadoc/org/apache/iceberg/rest/RESTSerializers.UpdateRequirementSerializer.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var methods = {"i0":42}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -48,7 +48,7 @@

    +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateReqSerializer instead.
    +

    -
    public static class RESTSerializers.UpdateRequirementSerializer
    +
    @Deprecated
    +public static class RESTSerializers.UpdateRequirementSerializer
     extends com.fasterxml.jackson.databind.JsonSerializer<UpdateTableRequest.UpdateRequirement>
    @@ -155,7 +159,9 @@

    Constructor Summary

    Constructor and Description
    UpdateRequirementSerializer() UpdateRequirementSerializer() +
    Deprecated. 
    @@ -167,7 +173,7 @@

    Constructor Summary

    Method Summary

    - + @@ -176,7 +182,9 @@

    Method Summary

    + com.fasterxml.jackson.databind.SerializerProvider serializers) +
    Deprecated. 
    All Methods Instance Methods Concrete Methods All Methods Instance Methods Concrete Methods Deprecated Methods 
    Modifier and Type Method and Description void serialize(UpdateTableRequest.UpdateRequirement value, com.fasterxml.jackson.core.JsonGenerator gen, - com.fasterxml.jackson.databind.SerializerProvider serializers) 
      @@ -214,6 +222,7 @@

      Constructor Detail

    • UpdateRequirementSerializer

      public UpdateRequirementSerializer()
      +
      Deprecated. 
    @@ -234,6 +243,7 @@

    serialize

    com.fasterxml.jackson.core.JsonGenerator gen, com.fasterxml.jackson.databind.SerializerProvider serializers) throws java.io.IOException +
    Deprecated. 
    Specified by:
    serialize in class com.fasterxml.jackson.databind.JsonSerializer<UpdateTableRequest.UpdateRequirement>
    @@ -270,7 +280,7 @@

    serialize

    + + + + + + + diff --git a/javadoc/org/apache/iceberg/rest/requests/CommitTransactionRequestParser.html b/javadoc/org/apache/iceberg/rest/requests/CommitTransactionRequestParser.html new file mode 100644 index 000000000..9ae080d31 --- /dev/null +++ b/javadoc/org/apache/iceberg/rest/requests/CommitTransactionRequestParser.html @@ -0,0 +1,295 @@ + + + + + +CommitTransactionRequestParser + + + + + + + + + + + +
    +
    org.apache.iceberg.rest.requests
    +

    Class CommitTransactionRequestParser

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • org.apache.iceberg.rest.requests.CommitTransactionRequestParser
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class CommitTransactionRequestParser
      +extends java.lang.Object
      +
    • +
    +
    +
    + +
    +
    + +
    +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/rest/requests/CreateNamespaceRequest.html b/javadoc/org/apache/iceberg/rest/requests/CreateNamespaceRequest.html index 159e13d70..2b578bd61 100644 --- a/javadoc/org/apache/iceberg/rest/requests/CreateNamespaceRequest.html +++ b/javadoc/org/apache/iceberg/rest/requests/CreateNamespaceRequest.html @@ -47,7 +47,7 @@

    +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirements + instead.
    +

    -
    public static class UpdateTableRequest.Builder
    +
    @Deprecated
    +public static class UpdateTableRequest.Builder
     extends java.lang.Object
    @@ -132,7 +137,9 @@

    Constructor Summary

    Builder(TableMetadata base, - boolean isReplace)  + boolean isReplace)
    +
    Deprecated. 
    +  @@ -144,18 +151,22 @@

    Constructor Summary

    Method Summary

    - + - + - +
    All Methods Instance Methods Concrete Methods All Methods Instance Methods Concrete Methods Deprecated Methods 
    Modifier and Type Method and Description
    UpdateTableRequestbuild() build() +
    Deprecated. 
    UpdateTableRequest.Builderupdate(MetadataUpdate update) update(MetadataUpdate update) +
    Deprecated. 
      @@ -187,6 +198,7 @@

      Constructor Detail

      Builder

      public Builder(TableMetadata base,
                      boolean isReplace)
      +
      Deprecated. 
    @@ -204,6 +216,7 @@

    Method Detail

  • update

    public UpdateTableRequest.Builder update(MetadataUpdate update)
    +
    Deprecated. 
  • @@ -213,6 +226,7 @@

    update

  • build

    public UpdateTableRequest build()
    +
    Deprecated. 
  • diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID.html index 79428e09b..416b8001b 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -199,7 +199,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID.html index b9780545d..5144b05e6 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -199,7 +199,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID.html index 98ded5c0d..85e8c63d5 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -199,7 +199,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId.html index 9404799a1..74c3ff915 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -233,7 +233,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId.html index b0ed2cef5..bf8f0c635 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -233,7 +233,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID.html index 04f53c516..ed88456cc 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -212,7 +212,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist.html index 883ee815f..f224f135a 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist.html @@ -107,7 +107,7 @@

    All Implemented Interfaces:
    -
    UpdateTableRequest.UpdateRequirement
    +
    UpdateTableRequest.UpdateRequirement, UpdateRequirement
    Enclosing interface:
    @@ -186,7 +186,7 @@

    validate

    public void validate(TableMetadata base)
    Specified by:
    -
    validate in interface UpdateTableRequest.UpdateRequirement
    +
    validate in interface UpdateRequirement
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableUUID.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableUUID.html index 79383fd92..43522f60e 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableUUID.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.UpdateRequirement.AssertTableUUID.html @@ -48,7 +48,7 @@

    +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirement instead.
    +

    -
    public static interface UpdateTableRequest.UpdateRequirement
    +
    @Deprecated
    +public static interface UpdateTableRequest.UpdateRequirement
    +extends UpdateRequirement
    @@ -128,35 +131,51 @@

    Nested Class Summary

    static class  -UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID  +UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID  +UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID  +UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId  +UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId  +UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID  +UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist  +UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist +
    Deprecated. 
    static class  -UpdateTableRequest.UpdateRequirement.AssertTableUUID  +UpdateTableRequest.UpdateRequirement.AssertTableUUID +
    Deprecated. 
    +  @@ -167,39 +186,12 @@

    Nested Class Summary

    Method Summary

    - - - - - - - - - - -
    All Methods Instance Methods Abstract Methods 
    Modifier and TypeMethod and Description
    voidvalidate(TableMetadata base) 
    - - - - - -
    -
    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.html index 4ddb619d1..996c3703d 100644 --- a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.html +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequest.html @@ -17,8 +17,8 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10}; -var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var methods = {"i0":41,"i1":41,"i2":41,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10}; +var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -134,11 +134,20 @@

    Nested Class Summary

    static class  -
    UpdateTableRequest.Builder  +UpdateTableRequest.Builder +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirements + instead.
    +
    + static interface  -UpdateTableRequest.UpdateRequirement  +UpdateTableRequest.UpdateRequirement +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirement instead.
    +
    + @@ -158,7 +167,7 @@

    Constructor Summary

    UpdateTableRequest()  -UpdateTableRequest(java.util.List<UpdateTableRequest.UpdateRequirement> requirements, +UpdateTableRequest(java.util.List<UpdateRequirement> requirements, java.util.List<MetadataUpdate> updates)  @@ -171,36 +180,58 @@

    Constructor Summary

    Method Summary

    - + - + - + - + - - + + + + + + + + + + - + - + - + - + + + + +
    All Methods Static Methods Instance Methods Concrete Methods All Methods Static Methods Instance Methods Concrete Methods Deprecated Methods 
    Modifier and Type Method and Description
    static UpdateTableRequest.BuilderbuilderFor(TableMetadata base) builderFor(TableMetadata base) +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirements.forUpdateTable(TableMetadata, List) instead.
    +
    +
    static UpdateTableRequest.BuilderbuilderForCreate() builderForCreate() +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirements.forCreateTable(List) instead.
    +
    +
    static UpdateTableRequest.BuilderbuilderForReplace(TableMetadata base) builderForReplace(TableMetadata base) +
    Deprecated.  +
    will be removed in 1.5.0, use UpdateRequirements.forReplaceTable(TableMetadata, List) instead.
    +
    +
    java.util.List<UpdateTableRequest.UpdateRequirement>requirements() static UpdateTableRequestcreate(TableIdentifier identifier, + java.util.List<UpdateRequirement> requirements, + java.util.List<MetadataUpdate> updates) 
    TableIdentifieridentifier() 
    java.util.List<UpdateRequirement>requirements() 
    java.lang.String toString() 
    java.util.List<MetadataUpdate> updates() 
    void validate()
    Ensures that a constructed instance of a REST message is valid according to the REST spec.
    @@ -243,7 +274,7 @@

    UpdateTableRequest

    @@ -279,7 +310,7 @@

    validate

    @@ -291,6 +322,15 @@

    updates

    public java.util.List<MetadataUpdate> updates()
    + + + + @@ -304,13 +344,26 @@

    toString

    + + + + @@ -319,7 +372,9 @@

    builderForCreate

    @@ -328,7 +383,9 @@

    builderForReplace

    diff --git a/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequestParser.html b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequestParser.html new file mode 100644 index 000000000..05aa5f8e0 --- /dev/null +++ b/javadoc/org/apache/iceberg/rest/requests/UpdateTableRequestParser.html @@ -0,0 +1,295 @@ + + + + + +UpdateTableRequestParser + + + + + + + + + + + +
    +
    org.apache.iceberg.rest.requests
    +

    Class UpdateTableRequestParser

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • org.apache.iceberg.rest.requests.UpdateTableRequestParser
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class UpdateTableRequestParser
      +extends java.lang.Object
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          toJson

          +
          public static java.lang.String toJson(UpdateTableRequest request,
          +                                      boolean pretty)
          +
        • +
        + + + +
          +
        • +

          toJson

          +
          public static void toJson(UpdateTableRequest request,
          +                          com.fasterxml.jackson.core.JsonGenerator gen)
          +                   throws java.io.IOException
          +
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        + + + + + + + +
          +
        • +

          fromJson

          +
          public static UpdateTableRequest fromJson(com.fasterxml.jackson.databind.JsonNode json)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + + + + + + diff --git a/javadoc/org/apache/iceberg/rest/requests/package-frame.html b/javadoc/org/apache/iceberg/rest/requests/package-frame.html index 04c375b1e..cd617fc59 100644 --- a/javadoc/org/apache/iceberg/rest/requests/package-frame.html +++ b/javadoc/org/apache/iceberg/rest/requests/package-frame.html @@ -12,15 +12,19 @@

    Interfaces

    Classes

    Enums

      diff --git a/javadoc/org/apache/iceberg/rest/requests/package-summary.html b/javadoc/org/apache/iceberg/rest/requests/package-summary.html index dac21ee13..3e6dc83c2 100644 --- a/javadoc/org/apache/iceberg/rest/requests/package-summary.html +++ b/javadoc/org/apache/iceberg/rest/requests/package-summary.html @@ -81,13 +81,19 @@

      Package org.apache.iceberg.rest.requests<

    ReportMetricsRequestRegisterTableRequest  
    UpdateTableRequest.UpdateRequirementReportMetricsRequest  
    UpdateTableRequest.UpdateRequirementDeprecated +
    will be removed in 1.5.0, use UpdateRequirement instead.
    +
    @@ -100,6 +106,14 @@

    Package org.apache.iceberg.rest.requests< +CommitTransactionRequest +  + + +CommitTransactionRequestParser +  + + CreateNamespaceRequest
    A REST request to create a namespace, with an optional set of properties.
    @@ -121,71 +135,85 @@

    Package org.apache.iceberg.rest.requests<   +RegisterTableRequestParser +  + + RenameTableRequest
    A REST request to rename a table.
    - + RenameTableRequest.Builder   - + ReportMetricsRequestParser   - + UpdateNamespacePropertiesRequest
    A REST request to set and/or remove properties on a namespace.
    - + UpdateNamespacePropertiesRequest.Builder   - + UpdateRequirementParser -  +Deprecated +
    will be removed in 1.5.0, use UpdateRequirementParser + instead.
    + - + UpdateTableRequest   - + UpdateTableRequest.Builder +Deprecated +
    will be removed in 1.5.0, use UpdateRequirements + instead.
    + + + +UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID   -UpdateTableRequest.UpdateRequirement.AssertCurrentSchemaID +UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID   -UpdateTableRequest.UpdateRequirement.AssertDefaultSortOrderID +UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID   -UpdateTableRequest.UpdateRequirement.AssertDefaultSpecID +UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId   -UpdateTableRequest.UpdateRequirement.AssertLastAssignedFieldId +UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId   -UpdateTableRequest.UpdateRequirement.AssertLastAssignedPartitionId +UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID   -UpdateTableRequest.UpdateRequirement.AssertRefSnapshotID +UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist   -UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist +UpdateTableRequest.UpdateRequirement.AssertTableUUID   -UpdateTableRequest.UpdateRequirement.AssertTableUUID +UpdateTableRequestParser   diff --git a/javadoc/org/apache/iceberg/rest/requests/package-tree.html b/javadoc/org/apache/iceberg/rest/requests/package-tree.html index db3b8a7b1..6941c2e58 100644 --- a/javadoc/org/apache/iceberg/rest/requests/package-tree.html +++ b/javadoc/org/apache/iceberg/rest/requests/package-tree.html @@ -79,10 +79,13 @@

    Class Hierarchy

    @@ -108,13 +112,18 @@

    Interface Hierarchy

    +
  • org.apache.iceberg.UpdateRequirement + +
  • +

    Enum Hierarchy

    • java.lang.Object diff --git a/javadoc/org/apache/iceberg/snowflake/SnowflakeCatalog.html b/javadoc/org/apache/iceberg/snowflake/SnowflakeCatalog.html index 2aaacea65..9739900e1 100644 --- a/javadoc/org/apache/iceberg/snowflake/SnowflakeCatalog.html +++ b/javadoc/org/apache/iceberg/snowflake/SnowflakeCatalog.html @@ -214,7 +214,7 @@

      Method Summary

      java.util.List<Namespace> listNamespaces(Namespace namespace) -
      List namespaces from the namespace.
      +
      List child namespaces from the namespace.
      @@ -447,15 +447,34 @@

      createNamespace

      listNamespaces

      public java.util.List<Namespace> listNamespaces(Namespace namespace)
      Description copied from interface: SupportsNamespaces
      -
      List namespaces from the namespace. +
      List child namespaces from the namespace. -

      For example, if table a.b.t exists, use 'SELECT NAMESPACE IN a' this method must return - Namepace.of("a","b") Namespace.

      +

      For two existing tables named 'a.b.c.table' and 'a.b.d.table', this method returns: + +

        +
      • Given: Namespace.empty() +
      • Returns: Namespace.of("a") +
      + +
        +
      • Given: Namespace.of("a") +
      • Returns: Namespace.of("a", "b") +
      + +
        +
      • Given: Namespace.of("a", "b") +
      • Returns: Namespace.of("a", "b", "c") and Namespace.of("a", "b", "d") +
      + +
        +
      • Given: Namespace.of("a", "b", "c") +
      • Returns: empty list, because there are no child namespaces +
      Specified by:
      listNamespaces in interface SupportsNamespaces
      Returns:
      -
      a List of namespace Namespace names
      +
      a List of child Namespace names from the given namespace
    diff --git a/javadoc/org/apache/iceberg/spark/ChangelogIterator.html b/javadoc/org/apache/iceberg/spark/ChangelogIterator.html index 44b9e586f..7da21eaa6 100644 --- a/javadoc/org/apache/iceberg/spark/ChangelogIterator.html +++ b/javadoc/org/apache/iceberg/spark/ChangelogIterator.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":10,"i2":10}; +var methods = {"i0":10,"i1":10,"i2":9,"i3":9,"i4":10,"i5":10,"i6":9,"i7":9,"i8":10,"i9":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -73,14 +73,14 @@ @@ -109,42 +109,73 @@

    Class ChangelogIterator

    All Implemented Interfaces:
    java.util.Iterator<org.apache.spark.sql.Row>
    +
    +
    Direct Known Subclasses:
    +
    ComputeUpdateIterator, RemoveNetCarryoverIterator
    +


    -
    public class ChangelogIterator
    +
    public abstract class ChangelogIterator
     extends java.lang.Object
     implements java.util.Iterator<org.apache.spark.sql.Row>
    -
    An iterator that transforms rows from changelog tables within a single Spark task. It assumes - that rows are sorted by identifier columns and change type. - -

    It removes the carry-over rows. Carry-over rows are the result of a removal and insertion of - the same row within an operation because of the copy-on-write mechanism. For example, given a - file which contains row1 (id=1, data='a') and row2 (id=2, data='b'). A copy-on-write delete of - row2 would require erasing this file and preserving row1 in a new file. The change-log table - would report this as (id=1, data='a', op='DELETE') and (id=1, data='a', op='INSERT'), despite it - not being an actual change to the table. The iterator finds the carry-over rows and removes them - from the result. - -

    This iterator also finds delete/insert rows which represent an update, and converts them into - update records. For example, these two rows - -

      -
    • (id=1, data='a', op='DELETE') -
    • (id=1, data='b', op='INSERT') -
    - -

    will be marked as update-rows: - -

      -
    • (id=1, data='a', op='UPDATE_BEFORE') -
    • (id=1, data='b', op='UPDATE_AFTER') -
    +
    An iterator that transforms rows from changelog tables within a single Spark task.
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected static java.lang.StringDELETE 
        protected static java.lang.StringINSERT 
        protected static java.lang.StringUPDATE_AFTER 
        protected static java.lang.StringUPDATE_BEFORE 
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected ChangelogIterator(java.util.Iterator<org.apache.spark.sql.Row> rowIterator, + org.apache.spark.sql.types.StructType rowType) 
        +
      • +
      • @@ -158,20 +189,58 @@

        Method Summary

        Method and Description -static java.util.Iterator<org.apache.spark.sql.Row> -
        create(java.util.Iterator<org.apache.spark.sql.Row> rowIterator, - org.apache.spark.sql.types.StructType rowType, - java.lang.String[] identifierFields) -
        Creates an iterator for records of a changelog table.
        - +protected java.lang.String +changeType(org.apache.spark.sql.Row row)  -boolean -hasNext()  +protected int +changeTypeIndex()  -org.apache.spark.sql.Row -next()  +static java.util.Iterator<org.apache.spark.sql.Row> +computeUpdates(java.util.Iterator<org.apache.spark.sql.Row> rowIterator, + org.apache.spark.sql.types.StructType rowType, + java.lang.String[] identifierFields) +
        Creates an iterator composing RemoveCarryoverIterator and ComputeUpdateIterator + to remove carry-over rows and compute update rows
        + + + +protected static int[] +generateIndicesToIdentifySameRow(int totalColumnCount, + java.util.Set<java.lang.Integer> metadataColumnIndices)  + + +protected boolean +isDifferentValue(org.apache.spark.sql.Row currentRow, + org.apache.spark.sql.Row nextRow, + int idx)  + + +protected boolean +isSameRecord(org.apache.spark.sql.Row currentRow, + org.apache.spark.sql.Row nextRow, + int[] indicesToIdentifySameRow)  + + +static java.util.Iterator<org.apache.spark.sql.Row> +removeCarryovers(java.util.Iterator<org.apache.spark.sql.Row> rowIterator, + org.apache.spark.sql.types.StructType rowType) +
        Creates an iterator that removes carry-over rows from a changelog table.
        + + + +static java.util.Iterator<org.apache.spark.sql.Row> +removeNetCarryovers(java.util.Iterator<org.apache.spark.sql.Row> rowIterator, + org.apache.spark.sql.types.StructType rowType)  + + +protected java.util.Iterator<org.apache.spark.sql.Row> +rowIterator()  + + +protected org.apache.spark.sql.types.StructType +rowType() 
          @@ -186,7 +255,7 @@

          Methods inherited from class java.lang.Object

          Methods inherited from interface java.util.Iterator

          -forEachRemaining, remove +forEachRemaining, hasNext, next, remove
      @@ -196,22 +265,121 @@

      Methods inherited from interface java.util.Iterator

      • + +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            DELETE

            +
            protected static final java.lang.String DELETE
            +
          • +
          + + + +
            +
          • +

            INSERT

            +
            protected static final java.lang.String INSERT
            +
          • +
          + + + +
            +
          • +

            UPDATE_BEFORE

            +
            protected static final java.lang.String UPDATE_BEFORE
            +
          • +
          + + + +
            +
          • +

            UPDATE_AFTER

            +
            protected static final java.lang.String UPDATE_AFTER
            +
          • +
          +
        • +
        + +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ChangelogIterator

            +
            protected ChangelogIterator(java.util.Iterator<org.apache.spark.sql.Row> rowIterator,
            +                            org.apache.spark.sql.types.StructType rowType)
            +
          • +
          +
        • +
        • Method Detail

          - + + + +
            +
          • +

            changeTypeIndex

            +
            protected int changeTypeIndex()
            +
          • +
          + + + +
            +
          • +

            rowType

            +
            protected org.apache.spark.sql.types.StructType rowType()
            +
          • +
          +
          • -

            create

            -
            public static java.util.Iterator<org.apache.spark.sql.Row> create(java.util.Iterator<org.apache.spark.sql.Row> rowIterator,
            -                                                                  org.apache.spark.sql.types.StructType rowType,
            -                                                                  java.lang.String[] identifierFields)
            -
            Creates an iterator for records of a changelog table.
            +

            changeType

            +
            protected java.lang.String changeType(org.apache.spark.sql.Row row)
            +
          • +
          + + + +
            +
          • +

            rowIterator

            +
            protected java.util.Iterator<org.apache.spark.sql.Row> rowIterator()
            +
          • +
          + + + +
            +
          • +

            computeUpdates

            +
            public static java.util.Iterator<org.apache.spark.sql.Row> computeUpdates(java.util.Iterator<org.apache.spark.sql.Row> rowIterator,
            +                                                                          org.apache.spark.sql.types.StructType rowType,
            +                                                                          java.lang.String[] identifierFields)
            +
            Creates an iterator composing RemoveCarryoverIterator and ComputeUpdateIterator + to remove carry-over rows and compute update rows
            Parameters:
            rowIterator - the iterator of rows from a changelog table
            @@ -219,34 +387,68 @@

            create

            identifierFields - the names of the identifier columns, which determine if rows are the same
            Returns:
            -
            a new ChangelogIterator instance concatenated with the null-removal iterator
            +
            a new iterator instance
          - +
          • -

            hasNext

            -
            public boolean hasNext()
            +

            removeCarryovers

            +
            public static java.util.Iterator<org.apache.spark.sql.Row> removeCarryovers(java.util.Iterator<org.apache.spark.sql.Row> rowIterator,
            +                                                                            org.apache.spark.sql.types.StructType rowType)
            +
            Creates an iterator that removes carry-over rows from a changelog table.
            -
            Specified by:
            -
            hasNext in interface java.util.Iterator<org.apache.spark.sql.Row>
            +
            Parameters:
            +
            rowIterator - the iterator of rows from a changelog table
            +
            rowType - the schema of the rows
            +
            Returns:
            +
            a new iterator instance
          - + + + +
            +
          • +

            removeNetCarryovers

            +
            public static java.util.Iterator<org.apache.spark.sql.Row> removeNetCarryovers(java.util.Iterator<org.apache.spark.sql.Row> rowIterator,
            +                                                                               org.apache.spark.sql.types.StructType rowType)
            +
          • +
          + + + +
            +
          • +

            isSameRecord

            +
            protected boolean isSameRecord(org.apache.spark.sql.Row currentRow,
            +                               org.apache.spark.sql.Row nextRow,
            +                               int[] indicesToIdentifySameRow)
            +
          • +
          + + + +
            +
          • +

            isDifferentValue

            +
            protected boolean isDifferentValue(org.apache.spark.sql.Row currentRow,
            +                                   org.apache.spark.sql.Row nextRow,
            +                                   int idx)
            +
          • +
          +
          • -

            next

            -
            public org.apache.spark.sql.Row next()
            -
            -
            Specified by:
            -
            next in interface java.util.Iterator<org.apache.spark.sql.Row>
            -
            +

            generateIndicesToIdentifySameRow

            +
            protected static int[] generateIndicesToIdentifySameRow(int totalColumnCount,
            +                                                        java.util.Set<java.lang.Integer> metadataColumnIndices)
        • @@ -302,14 +504,14 @@

          next

      diff --git a/javadoc/org/apache/iceberg/spark/CommitMetadata.html b/javadoc/org/apache/iceberg/spark/CommitMetadata.html index b69e3606e..7557e4fd9 100644 --- a/javadoc/org/apache/iceberg/spark/CommitMetadata.html +++ b/javadoc/org/apache/iceberg/spark/CommitMetadata.html @@ -48,7 +48,7 @@
      Parameters:
      -
      properties - extra commit metadata to attach to the snapshot committed within callable
      +
      properties - extra commit metadata to attach to the snapshot committed within callable. + The prefix will be removed for properties starting with SnapshotSummary.EXTRA_METADATA_PREFIX
      callable - the code to be executed
      exClass - the expected type of exception which would be thrown from callable
      Throws:
      @@ -222,7 +223,7 @@

      commitProperties

    @@ -224,7 +231,7 @@

    parseSortOrder

    Called to persist the output of a rewrite action for a specific group. Since the write is done via a Spark Datasource, we have to propagate the result through this side-effect call.
    Parameters:
    table - table where the rewrite is occurring
    -
    fileSetID - the id used to identify the source set of files being rewritten
    -
    newDataFiles - the new files which have been written
    +
    fileSetId - the id used to identify the source set of files being rewritten
    +
    newFiles - the new files which have been written
    - +
    • -

      fetchNewDataFiles

      -
      public java.util.Set<DataFile> fetchNewDataFiles(Table table,
      -                                                 java.lang.String fileSetID)
      +

      fetchNewFiles

      +
      public java.util.Set<F> fetchNewFiles(Table table,
      +                                      java.lang.String fileSetId)
    @@ -220,16 +220,16 @@

    fetchNewDataFiles

  • clearRewrite

    public void clearRewrite(Table table,
    -                         java.lang.String fileSetID)
    + java.lang.String fileSetId)
  • - +
    • -

      fetchSetIDs

      -
      public java.util.Set<java.lang.String> fetchSetIDs(Table table)
      +

      fetchSetIds

      +
      public java.util.Set<java.lang.String> fetchSetIds(Table table)
    @@ -260,7 +260,7 @@

    fetchSetIDs

    @@ -174,14 +174,18 @@

    Method Summary

    capabilities()  +org.apache.spark.sql.connector.catalog.MetadataColumn[] +metadataColumns()  + + java.lang.String name()  - + org.apache.spark.sql.connector.read.ScanBuilder newScanBuilder(org.apache.spark.sql.util.CaseInsensitiveStringMap options)  - + org.apache.spark.sql.types.StructType schema()  @@ -194,11 +198,18 @@

    Methods inherited from class java.lang.Object

    clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      +
    • + + +

      Methods inherited from interface org.apache.spark.sql.connector.catalog.SupportsMetadataColumns

      +canRenameConflictingMetadataColumns
    • +
    +
    • Methods inherited from interface org.apache.spark.sql.connector.catalog.Table

      -partitioning, properties
    • +columns, partitioning, properties
    @@ -295,7 +306,7 @@

    capabilities

    -
      +
      • newScanBuilder

        public org.apache.spark.sql.connector.read.ScanBuilder newScanBuilder(org.apache.spark.sql.util.CaseInsensitiveStringMap options)
        @@ -305,6 +316,19 @@

        newScanBuilder

      + + + +
        +
      • +

        metadataColumns

        +
        public org.apache.spark.sql.connector.catalog.MetadataColumn[] metadataColumns()
        +
        +
        Specified by:
        +
        metadataColumns in interface org.apache.spark.sql.connector.catalog.SupportsMetadataColumns
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/spark/source/SparkMicroBatchStream.html b/javadoc/org/apache/iceberg/spark/source/SparkMicroBatchStream.html index 3a0b67680..d6fac3e99 100644 --- a/javadoc/org/apache/iceberg/spark/source/SparkMicroBatchStream.html +++ b/javadoc/org/apache/iceberg/spark/source/SparkMicroBatchStream.html @@ -17,7 +17,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -107,13 +107,13 @@

    Class SparkMicroBatchStrea
  • All Implemented Interfaces:
    -
    org.apache.spark.sql.connector.read.streaming.MicroBatchStream, org.apache.spark.sql.connector.read.streaming.SparkDataStream
    +
    org.apache.spark.sql.connector.read.streaming.MicroBatchStream, org.apache.spark.sql.connector.read.streaming.SparkDataStream, org.apache.spark.sql.connector.read.streaming.SupportsAdmissionControl


    public class SparkMicroBatchStream
     extends java.lang.Object
    -implements org.apache.spark.sql.connector.read.streaming.MicroBatchStream
    +implements org.apache.spark.sql.connector.read.streaming.MicroBatchStream, org.apache.spark.sql.connector.read.streaming.SupportsAdmissionControl
  • @@ -145,19 +145,28 @@

    Method Summary

    deserializeOffset(java.lang.String json)  +org.apache.spark.sql.connector.read.streaming.ReadLimit +getDefaultReadLimit()  + + org.apache.spark.sql.connector.read.streaming.Offset initialOffset()  - + org.apache.spark.sql.connector.read.streaming.Offset latestOffset()  - + +org.apache.spark.sql.connector.read.streaming.Offset +latestOffset(org.apache.spark.sql.connector.read.streaming.Offset startOffset, + org.apache.spark.sql.connector.read.streaming.ReadLimit limit)  + + org.apache.spark.sql.connector.read.InputPartition[] planInputPartitions(org.apache.spark.sql.connector.read.streaming.Offset start, org.apache.spark.sql.connector.read.streaming.Offset end)  - + void stop()  @@ -169,6 +178,13 @@

    Method Summary

    Methods inherited from class java.lang.Object

    clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait +
      +
    • + + +

      Methods inherited from interface org.apache.spark.sql.connector.read.streaming.SupportsAdmissionControl

      +reportLatestOffset
    • +
    @@ -265,7 +281,7 @@

    commit

    -
      +
      • stop

        public void stop()
        @@ -275,6 +291,33 @@

        stop

      + + + +
        +
      • +

        latestOffset

        +
        public org.apache.spark.sql.connector.read.streaming.Offset latestOffset(org.apache.spark.sql.connector.read.streaming.Offset startOffset,
        +                                                                         org.apache.spark.sql.connector.read.streaming.ReadLimit limit)
        +
        +
        Specified by:
        +
        latestOffset in interface org.apache.spark.sql.connector.read.streaming.SupportsAdmissionControl
        +
        +
      • +
      + + + +
        +
      • +

        getDefaultReadLimit

        +
        public org.apache.spark.sql.connector.read.streaming.ReadLimit getDefaultReadLimit()
        +
        +
        Specified by:
        +
        getDefaultReadLimit in interface org.apache.spark.sql.connector.read.streaming.SupportsAdmissionControl
        +
        +
      • +
    diff --git a/javadoc/org/apache/iceberg/spark/source/SparkPartitionedWriter.html b/javadoc/org/apache/iceberg/spark/source/SparkPartitionedWriter.html index d1dc2e21b..fb67cdfb5 100644 --- a/javadoc/org/apache/iceberg/spark/source/SparkPartitionedWriter.html +++ b/javadoc/org/apache/iceberg/spark/source/SparkPartitionedWriter.html @@ -48,7 +48,7 @@