diff --git a/provider/cmd/pulumi-resource-digitalocean/bridge-metadata.json b/provider/cmd/pulumi-resource-digitalocean/bridge-metadata.json index 852797f1..d904b9cc 100644 --- a/provider/cmd/pulumi-resource-digitalocean/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-digitalocean/bridge-metadata.json @@ -482,6 +482,10 @@ } } }, + "digitalocean_database_kafka_config": { + "current": "digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", + "majorVersion": 4 + }, "digitalocean_database_kafka_topic": { "current": "digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic", "majorVersion": 4, @@ -491,6 +495,10 @@ } } }, + "digitalocean_database_mongodb_config": { + "current": "digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", + "majorVersion": 4 + }, "digitalocean_database_mysql_config": { "current": "digitalocean:index/databaseMysqlConfig:DatabaseMysqlConfig", "majorVersion": 4 @@ -537,6 +545,9 @@ "fields": { "acl": { "maxItemsOne": false + }, + "opensearch_acl": { + "maxItemsOne": false } } } diff --git a/provider/cmd/pulumi-resource-digitalocean/schema.json b/provider/cmd/pulumi-resource-digitalocean/schema.json index 6dc1c4d6..e8b58304 100644 --- a/provider/cmd/pulumi-resource-digitalocean/schema.json +++ b/provider/cmd/pulumi-resource-digitalocean/schema.json @@ -2621,6 +2621,12 @@ "$ref": "#/types/digitalocean:index/DatabaseUserSettingAcl:DatabaseUserSettingAcl" }, "description": "A set of ACLs (Access Control Lists) specifying permission on topics with a Kafka cluster. The properties of an individual ACL are described below:\n\nAn individual ACL includes the following:\n" + }, + "opensearchAcls": { + "type": "array", + "items": { + "$ref": "#/types/digitalocean:index/DatabaseUserSettingOpensearchAcl:DatabaseUserSettingOpensearchAcl" + } } }, "type": "object" @@ -2655,6 +2661,22 @@ } } }, + "digitalocean:index/DatabaseUserSettingOpensearchAcl:DatabaseUserSettingOpensearchAcl": { + "properties": { + "index": { + "type": "string" + }, + "permission": { + "type": "string", + "description": "The permission level applied to the ACL. This includes \"admin\", \"consume\", \"produce\", and \"produceconsume\". \"admin\" allows for producing and consuming as well as add/delete/update permission for topics. \"consume\" allows only for reading topic messages. \"produce\" allows only for writing topic messages. \"produceconsume\" allows for both reading and writing topic messages.\n" + } + }, + "type": "object", + "required": [ + "index", + "permission" + ] + }, "digitalocean:index/FirewallInboundRule:FirewallInboundRule": { "properties": { "portRange": { @@ -9645,6 +9667,260 @@ "type": "object" } }, + "digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig": { + "description": "Provides a virtual resource that can be used to change advanced configuration\noptions for a DigitalOcean managed Kafka database cluster.\n\n\u003e **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as digitalocean from \"@pulumi/digitalocean\";\n\nconst exampleDatabaseCluster = new digitalocean.DatabaseCluster(\"example\", {\n name: \"example-kafka-cluster\",\n engine: \"kafka\",\n version: \"3.7\",\n size: digitalocean.DatabaseSlug.DB_1VPCU1GB,\n region: digitalocean.Region.NYC3,\n nodeCount: 3,\n});\nconst example = new digitalocean.DatabaseKafkaConfig(\"example\", {\n clusterId: exampleDatabaseCluster.id,\n groupInitialRebalanceDelayMs: 3000,\n groupMinSessionTimeoutMs: 6000,\n groupMaxSessionTimeoutMs: 1800000,\n messageMaxBytes: 1048588,\n logCleanerDeleteRetentionMs: 86400000,\n logCleanerMinCompactionLagMs: \"0\",\n logFlushIntervalMs: \"9223372036854775807\",\n logIndexIntervalBytes: 4096,\n logMessageDownconversionEnable: true,\n logMessageTimestampDifferenceMaxMs: \"9223372036854775807\",\n logPreallocate: false,\n logRetentionBytes: \"-1\",\n logRetentionHours: 168,\n logRetentionMs: \"604800000\",\n logRollJitterMs: \"0\",\n logSegmentDeleteDelayMs: 60000,\n autoCreateTopicsEnable: true,\n});\n```\n```python\nimport pulumi\nimport pulumi_digitalocean as digitalocean\n\nexample_database_cluster = digitalocean.DatabaseCluster(\"example\",\n name=\"example-kafka-cluster\",\n engine=\"kafka\",\n version=\"3.7\",\n size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB,\n region=digitalocean.Region.NYC3,\n node_count=3)\nexample = digitalocean.DatabaseKafkaConfig(\"example\",\n cluster_id=example_database_cluster.id,\n group_initial_rebalance_delay_ms=3000,\n group_min_session_timeout_ms=6000,\n group_max_session_timeout_ms=1800000,\n message_max_bytes=1048588,\n log_cleaner_delete_retention_ms=86400000,\n log_cleaner_min_compaction_lag_ms=\"0\",\n log_flush_interval_ms=\"9223372036854775807\",\n log_index_interval_bytes=4096,\n log_message_downconversion_enable=True,\n log_message_timestamp_difference_max_ms=\"9223372036854775807\",\n log_preallocate=False,\n log_retention_bytes=\"-1\",\n log_retention_hours=168,\n log_retention_ms=\"604800000\",\n log_roll_jitter_ms=\"0\",\n log_segment_delete_delay_ms=60000,\n auto_create_topics_enable=True)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing DigitalOcean = Pulumi.DigitalOcean;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster(\"example\", new()\n {\n Name = \"example-kafka-cluster\",\n Engine = \"kafka\",\n Version = \"3.7\",\n Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB,\n Region = DigitalOcean.Region.NYC3,\n NodeCount = 3,\n });\n\n var example = new DigitalOcean.DatabaseKafkaConfig(\"example\", new()\n {\n ClusterId = exampleDatabaseCluster.Id,\n GroupInitialRebalanceDelayMs = 3000,\n GroupMinSessionTimeoutMs = 6000,\n GroupMaxSessionTimeoutMs = 1800000,\n MessageMaxBytes = 1048588,\n LogCleanerDeleteRetentionMs = 86400000,\n LogCleanerMinCompactionLagMs = \"0\",\n LogFlushIntervalMs = \"9223372036854775807\",\n LogIndexIntervalBytes = 4096,\n LogMessageDownconversionEnable = true,\n LogMessageTimestampDifferenceMaxMs = \"9223372036854775807\",\n LogPreallocate = false,\n LogRetentionBytes = \"-1\",\n LogRetentionHours = 168,\n LogRetentionMs = \"604800000\",\n LogRollJitterMs = \"0\",\n LogSegmentDeleteDelayMs = 60000,\n AutoCreateTopicsEnable = true,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, \"example\", \u0026digitalocean.DatabaseClusterArgs{\n\t\t\tName: pulumi.String(\"example-kafka-cluster\"),\n\t\t\tEngine: pulumi.String(\"kafka\"),\n\t\t\tVersion: pulumi.String(\"3.7\"),\n\t\t\tSize: pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB),\n\t\t\tRegion: pulumi.String(digitalocean.RegionNYC3),\n\t\t\tNodeCount: pulumi.Int(3),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = digitalocean.NewDatabaseKafkaConfig(ctx, \"example\", \u0026digitalocean.DatabaseKafkaConfigArgs{\n\t\t\tClusterId: exampleDatabaseCluster.ID(),\n\t\t\tGroupInitialRebalanceDelayMs: pulumi.Int(3000),\n\t\t\tGroupMinSessionTimeoutMs: pulumi.Int(6000),\n\t\t\tGroupMaxSessionTimeoutMs: pulumi.Int(1800000),\n\t\t\tMessageMaxBytes: pulumi.Int(1048588),\n\t\t\tLogCleanerDeleteRetentionMs: pulumi.Int(86400000),\n\t\t\tLogCleanerMinCompactionLagMs: pulumi.String(\"0\"),\n\t\t\tLogFlushIntervalMs: pulumi.String(\"9223372036854775807\"),\n\t\t\tLogIndexIntervalBytes: pulumi.Int(4096),\n\t\t\tLogMessageDownconversionEnable: pulumi.Bool(true),\n\t\t\tLogMessageTimestampDifferenceMaxMs: pulumi.String(\"9223372036854775807\"),\n\t\t\tLogPreallocate: pulumi.Bool(false),\n\t\t\tLogRetentionBytes: pulumi.String(\"-1\"),\n\t\t\tLogRetentionHours: pulumi.Int(168),\n\t\t\tLogRetentionMs: pulumi.String(\"604800000\"),\n\t\t\tLogRollJitterMs: pulumi.String(\"0\"),\n\t\t\tLogSegmentDeleteDelayMs: pulumi.Int(60000),\n\t\t\tAutoCreateTopicsEnable: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```yaml\nresources:\n example:\n type: digitalocean:DatabaseKafkaConfig\n properties:\n clusterId: ${exampleDatabaseCluster.id}\n groupInitialRebalanceDelayMs: 3000\n groupMinSessionTimeoutMs: 6000\n groupMaxSessionTimeoutMs: 1.8e+06\n messageMaxBytes: 1.048588e+06\n logCleanerDeleteRetentionMs: 8.64e+07\n logCleanerMinCompactionLagMs: 0\n logFlushIntervalMs: 9.223372036854776e+18\n logIndexIntervalBytes: 4096\n logMessageDownconversionEnable: true\n logMessageTimestampDifferenceMaxMs: 9.223372036854776e+18\n logPreallocate: false\n logRetentionBytes: -1\n logRetentionHours: 168\n logRetentionMs: 6.048e+08\n logRollJitterMs: 0\n logSegmentDeleteDelayMs: 60000\n autoCreateTopicsEnable: true\n exampleDatabaseCluster:\n type: digitalocean:DatabaseCluster\n name: example\n properties:\n name: example-kafka-cluster\n engine: kafka\n version: '3.7'\n size: db-s-1vcpu-1gb\n region: nyc3\n nodeCount: 3\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nA Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g.\n\n```sh\n$ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712\n```\n\n", + "properties": { + "autoCreateTopicsEnable": { + "type": "boolean", + "description": "Enable auto creation of topics.\n" + }, + "clusterId": { + "type": "string", + "description": "The ID of the target Kafka cluster.\n" + }, + "groupInitialRebalanceDelayMs": { + "type": "integer", + "description": "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.\n" + }, + "groupMaxSessionTimeoutMs": { + "type": "integer", + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "groupMinSessionTimeoutMs": { + "type": "integer", + "description": "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "logCleanerDeleteRetentionMs": { + "type": "integer", + "description": "How long are delete records retained?\n" + }, + "logCleanerMinCompactionLagMs": { + "type": "string", + "description": "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.\n" + }, + "logFlushIntervalMs": { + "type": "string", + "description": "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.\n" + }, + "logIndexIntervalBytes": { + "type": "integer", + "description": "The interval with which Kafka adds an entry to the offset index.\n" + }, + "logMessageDownconversionEnable": { + "type": "boolean", + "description": "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.\n" + }, + "logMessageTimestampDifferenceMaxMs": { + "type": "string", + "description": "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.\n" + }, + "logPreallocate": { + "type": "boolean", + "description": "Controls whether to preallocate a file when creating a new segment.\n" + }, + "logRetentionBytes": { + "type": "string", + "description": "The maximum size of the log before deleting messages.\n" + }, + "logRetentionHours": { + "type": "integer", + "description": "The number of hours to keep a log file before deleting it.\n" + }, + "logRetentionMs": { + "type": "string", + "description": "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.\n" + }, + "logRollJitterMs": { + "type": "string", + "description": "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.\n" + }, + "logSegmentDeleteDelayMs": { + "type": "integer", + "description": "The amount of time to wait before deleting a file from the filesystem.\n" + }, + "messageMaxBytes": { + "type": "integer", + "description": "The maximum size of message that the server can receive.\n" + } + }, + "required": [ + "autoCreateTopicsEnable", + "clusterId", + "groupInitialRebalanceDelayMs", + "groupMaxSessionTimeoutMs", + "groupMinSessionTimeoutMs", + "logCleanerDeleteRetentionMs", + "logCleanerMinCompactionLagMs", + "logFlushIntervalMs", + "logIndexIntervalBytes", + "logMessageDownconversionEnable", + "logMessageTimestampDifferenceMaxMs", + "logPreallocate", + "logRetentionBytes", + "logRetentionHours", + "logRetentionMs", + "logRollJitterMs", + "logSegmentDeleteDelayMs", + "messageMaxBytes" + ], + "inputProperties": { + "autoCreateTopicsEnable": { + "type": "boolean", + "description": "Enable auto creation of topics.\n" + }, + "clusterId": { + "type": "string", + "description": "The ID of the target Kafka cluster.\n", + "willReplaceOnChanges": true + }, + "groupInitialRebalanceDelayMs": { + "type": "integer", + "description": "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.\n" + }, + "groupMaxSessionTimeoutMs": { + "type": "integer", + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "groupMinSessionTimeoutMs": { + "type": "integer", + "description": "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "logCleanerDeleteRetentionMs": { + "type": "integer", + "description": "How long are delete records retained?\n" + }, + "logCleanerMinCompactionLagMs": { + "type": "string", + "description": "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.\n" + }, + "logFlushIntervalMs": { + "type": "string", + "description": "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.\n" + }, + "logIndexIntervalBytes": { + "type": "integer", + "description": "The interval with which Kafka adds an entry to the offset index.\n" + }, + "logMessageDownconversionEnable": { + "type": "boolean", + "description": "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.\n" + }, + "logMessageTimestampDifferenceMaxMs": { + "type": "string", + "description": "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.\n" + }, + "logPreallocate": { + "type": "boolean", + "description": "Controls whether to preallocate a file when creating a new segment.\n" + }, + "logRetentionBytes": { + "type": "string", + "description": "The maximum size of the log before deleting messages.\n" + }, + "logRetentionHours": { + "type": "integer", + "description": "The number of hours to keep a log file before deleting it.\n" + }, + "logRetentionMs": { + "type": "string", + "description": "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.\n" + }, + "logRollJitterMs": { + "type": "string", + "description": "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.\n" + }, + "logSegmentDeleteDelayMs": { + "type": "integer", + "description": "The amount of time to wait before deleting a file from the filesystem.\n" + }, + "messageMaxBytes": { + "type": "integer", + "description": "The maximum size of message that the server can receive.\n" + } + }, + "requiredInputs": [ + "clusterId" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering DatabaseKafkaConfig resources.\n", + "properties": { + "autoCreateTopicsEnable": { + "type": "boolean", + "description": "Enable auto creation of topics.\n" + }, + "clusterId": { + "type": "string", + "description": "The ID of the target Kafka cluster.\n", + "willReplaceOnChanges": true + }, + "groupInitialRebalanceDelayMs": { + "type": "integer", + "description": "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.\n" + }, + "groupMaxSessionTimeoutMs": { + "type": "integer", + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "groupMinSessionTimeoutMs": { + "type": "integer", + "description": "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\n" + }, + "logCleanerDeleteRetentionMs": { + "type": "integer", + "description": "How long are delete records retained?\n" + }, + "logCleanerMinCompactionLagMs": { + "type": "string", + "description": "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.\n" + }, + "logFlushIntervalMs": { + "type": "string", + "description": "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.\n" + }, + "logIndexIntervalBytes": { + "type": "integer", + "description": "The interval with which Kafka adds an entry to the offset index.\n" + }, + "logMessageDownconversionEnable": { + "type": "boolean", + "description": "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.\n" + }, + "logMessageTimestampDifferenceMaxMs": { + "type": "string", + "description": "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.\n" + }, + "logPreallocate": { + "type": "boolean", + "description": "Controls whether to preallocate a file when creating a new segment.\n" + }, + "logRetentionBytes": { + "type": "string", + "description": "The maximum size of the log before deleting messages.\n" + }, + "logRetentionHours": { + "type": "integer", + "description": "The number of hours to keep a log file before deleting it.\n" + }, + "logRetentionMs": { + "type": "string", + "description": "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.\n" + }, + "logRollJitterMs": { + "type": "string", + "description": "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.\n" + }, + "logSegmentDeleteDelayMs": { + "type": "integer", + "description": "The amount of time to wait before deleting a file from the filesystem.\n" + }, + "messageMaxBytes": { + "type": "integer", + "description": "The maximum size of message that the server can receive.\n" + } + }, + "type": "object" + } + }, "digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic": { "description": "Provides a DigitalOcean Kafka topic for Kafka clusters.\n\n## Example Usage\n\n### Create a new Kafka topic\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as digitalocean from \"@pulumi/digitalocean\";\n\nconst kafka_example = new digitalocean.DatabaseCluster(\"kafka-example\", {\n name: \"example-kafka-cluster\",\n engine: \"kafka\",\n version: \"3.5\",\n size: \"db-s-2vcpu-2gb\",\n region: digitalocean.Region.NYC1,\n nodeCount: 3,\n tags: [\"production\"],\n});\nconst topic_01 = new digitalocean.DatabaseKafkaTopic(\"topic-01\", {\n clusterId: kafka_example.id,\n name: \"topic-01\",\n partitionCount: 3,\n replicationFactor: 2,\n configs: [{\n cleanupPolicy: \"compact\",\n compressionType: \"uncompressed\",\n deleteRetentionMs: \"14000\",\n fileDeleteDelayMs: \"170000\",\n flushMessages: \"92233\",\n flushMs: \"92233720368\",\n indexIntervalBytes: \"40962\",\n maxCompactionLagMs: \"9223372036854775807\",\n maxMessageBytes: \"1048588\",\n messageDownConversionEnable: true,\n messageFormatVersion: \"3.0-IV1\",\n messageTimestampDifferenceMaxMs: \"9223372036854775807\",\n messageTimestampType: \"log_append_time\",\n minCleanableDirtyRatio: 0.5,\n minCompactionLagMs: \"20000\",\n minInsyncReplicas: 2,\n preallocate: false,\n retentionBytes: \"-1\",\n retentionMs: \"-1\",\n segmentBytes: \"209715200\",\n segmentIndexBytes: \"10485760\",\n segmentJitterMs: \"0\",\n segmentMs: \"604800000\",\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_digitalocean as digitalocean\n\nkafka_example = digitalocean.DatabaseCluster(\"kafka-example\",\n name=\"example-kafka-cluster\",\n engine=\"kafka\",\n version=\"3.5\",\n size=\"db-s-2vcpu-2gb\",\n region=digitalocean.Region.NYC1,\n node_count=3,\n tags=[\"production\"])\ntopic_01 = digitalocean.DatabaseKafkaTopic(\"topic-01\",\n cluster_id=kafka_example.id,\n name=\"topic-01\",\n partition_count=3,\n replication_factor=2,\n configs=[{\n \"cleanup_policy\": \"compact\",\n \"compression_type\": \"uncompressed\",\n \"delete_retention_ms\": \"14000\",\n \"file_delete_delay_ms\": \"170000\",\n \"flush_messages\": \"92233\",\n \"flush_ms\": \"92233720368\",\n \"index_interval_bytes\": \"40962\",\n \"max_compaction_lag_ms\": \"9223372036854775807\",\n \"max_message_bytes\": \"1048588\",\n \"message_down_conversion_enable\": True,\n \"message_format_version\": \"3.0-IV1\",\n \"message_timestamp_difference_max_ms\": \"9223372036854775807\",\n \"message_timestamp_type\": \"log_append_time\",\n \"min_cleanable_dirty_ratio\": 0.5,\n \"min_compaction_lag_ms\": \"20000\",\n \"min_insync_replicas\": 2,\n \"preallocate\": False,\n \"retention_bytes\": \"-1\",\n \"retention_ms\": \"-1\",\n \"segment_bytes\": \"209715200\",\n \"segment_index_bytes\": \"10485760\",\n \"segment_jitter_ms\": \"0\",\n \"segment_ms\": \"604800000\",\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing DigitalOcean = Pulumi.DigitalOcean;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var kafka_example = new DigitalOcean.DatabaseCluster(\"kafka-example\", new()\n {\n Name = \"example-kafka-cluster\",\n Engine = \"kafka\",\n Version = \"3.5\",\n Size = \"db-s-2vcpu-2gb\",\n Region = DigitalOcean.Region.NYC1,\n NodeCount = 3,\n Tags = new[]\n {\n \"production\",\n },\n });\n\n var topic_01 = new DigitalOcean.DatabaseKafkaTopic(\"topic-01\", new()\n {\n ClusterId = kafka_example.Id,\n Name = \"topic-01\",\n PartitionCount = 3,\n ReplicationFactor = 2,\n Configs = new[]\n {\n new DigitalOcean.Inputs.DatabaseKafkaTopicConfigArgs\n {\n CleanupPolicy = \"compact\",\n CompressionType = \"uncompressed\",\n DeleteRetentionMs = \"14000\",\n FileDeleteDelayMs = \"170000\",\n FlushMessages = \"92233\",\n FlushMs = \"92233720368\",\n IndexIntervalBytes = \"40962\",\n MaxCompactionLagMs = \"9223372036854775807\",\n MaxMessageBytes = \"1048588\",\n MessageDownConversionEnable = true,\n MessageFormatVersion = \"3.0-IV1\",\n MessageTimestampDifferenceMaxMs = \"9223372036854775807\",\n MessageTimestampType = \"log_append_time\",\n MinCleanableDirtyRatio = 0.5,\n MinCompactionLagMs = \"20000\",\n MinInsyncReplicas = 2,\n Preallocate = false,\n RetentionBytes = \"-1\",\n RetentionMs = \"-1\",\n SegmentBytes = \"209715200\",\n SegmentIndexBytes = \"10485760\",\n SegmentJitterMs = \"0\",\n SegmentMs = \"604800000\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := digitalocean.NewDatabaseCluster(ctx, \"kafka-example\", \u0026digitalocean.DatabaseClusterArgs{\n\t\t\tName: pulumi.String(\"example-kafka-cluster\"),\n\t\t\tEngine: pulumi.String(\"kafka\"),\n\t\t\tVersion: pulumi.String(\"3.5\"),\n\t\t\tSize: pulumi.String(\"db-s-2vcpu-2gb\"),\n\t\t\tRegion: pulumi.String(digitalocean.RegionNYC1),\n\t\t\tNodeCount: pulumi.Int(3),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"production\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = digitalocean.NewDatabaseKafkaTopic(ctx, \"topic-01\", \u0026digitalocean.DatabaseKafkaTopicArgs{\n\t\t\tClusterId: kafka_example.ID(),\n\t\t\tName: pulumi.String(\"topic-01\"),\n\t\t\tPartitionCount: pulumi.Int(3),\n\t\t\tReplicationFactor: pulumi.Int(2),\n\t\t\tConfigs: digitalocean.DatabaseKafkaTopicConfigArray{\n\t\t\t\t\u0026digitalocean.DatabaseKafkaTopicConfigArgs{\n\t\t\t\t\tCleanupPolicy: pulumi.String(\"compact\"),\n\t\t\t\t\tCompressionType: pulumi.String(\"uncompressed\"),\n\t\t\t\t\tDeleteRetentionMs: pulumi.String(\"14000\"),\n\t\t\t\t\tFileDeleteDelayMs: pulumi.String(\"170000\"),\n\t\t\t\t\tFlushMessages: pulumi.String(\"92233\"),\n\t\t\t\t\tFlushMs: pulumi.String(\"92233720368\"),\n\t\t\t\t\tIndexIntervalBytes: pulumi.String(\"40962\"),\n\t\t\t\t\tMaxCompactionLagMs: pulumi.String(\"9223372036854775807\"),\n\t\t\t\t\tMaxMessageBytes: pulumi.String(\"1048588\"),\n\t\t\t\t\tMessageDownConversionEnable: pulumi.Bool(true),\n\t\t\t\t\tMessageFormatVersion: pulumi.String(\"3.0-IV1\"),\n\t\t\t\t\tMessageTimestampDifferenceMaxMs: pulumi.String(\"9223372036854775807\"),\n\t\t\t\t\tMessageTimestampType: pulumi.String(\"log_append_time\"),\n\t\t\t\t\tMinCleanableDirtyRatio: pulumi.Float64(0.5),\n\t\t\t\t\tMinCompactionLagMs: pulumi.String(\"20000\"),\n\t\t\t\t\tMinInsyncReplicas: pulumi.Int(2),\n\t\t\t\t\tPreallocate: pulumi.Bool(false),\n\t\t\t\t\tRetentionBytes: pulumi.String(\"-1\"),\n\t\t\t\t\tRetentionMs: pulumi.String(\"-1\"),\n\t\t\t\t\tSegmentBytes: pulumi.String(\"209715200\"),\n\t\t\t\t\tSegmentIndexBytes: pulumi.String(\"10485760\"),\n\t\t\t\t\tSegmentJitterMs: pulumi.String(\"0\"),\n\t\t\t\t\tSegmentMs: pulumi.String(\"604800000\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```yaml\nresources:\n topic-01:\n type: digitalocean:DatabaseKafkaTopic\n properties:\n clusterId: ${[\"kafka-example\"].id}\n name: topic-01\n partitionCount: 3\n replicationFactor: 2\n configs:\n - cleanupPolicy: compact\n compressionType: uncompressed\n deleteRetentionMs: 14000\n fileDeleteDelayMs: 170000\n flushMessages: 92233\n flushMs: 9.2233720368e+10\n indexIntervalBytes: 40962\n maxCompactionLagMs: 9.223372036854776e+18\n maxMessageBytes: 1.048588e+06\n messageDownConversionEnable: true\n messageFormatVersion: 3.0-IV1\n messageTimestampDifferenceMaxMs: 9.223372036854776e+18\n messageTimestampType: log_append_time\n minCleanableDirtyRatio: 0.5\n minCompactionLagMs: 20000\n minInsyncReplicas: 2\n preallocate: false\n retentionBytes: -1\n retentionMs: -1\n segmentBytes: 2.097152e+08\n segmentIndexBytes: 1.048576e+07\n segmentJitterMs: 0\n segmentMs: 6.048e+08\n kafka-example:\n type: digitalocean:DatabaseCluster\n properties:\n name: example-kafka-cluster\n engine: kafka\n version: '3.5'\n size: db-s-2vcpu-2gb\n region: nyc1\n nodeCount: 3\n tags:\n - production\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nTopics can be imported using the `id` of the source cluster and the `name` of the topic joined with a comma. For example:\n\n```sh\n$ pulumi import digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic topic-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,topic-01\n```\n\n", "properties": { @@ -9748,6 +10024,104 @@ "type": "object" } }, + "digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig": { + "description": "Provides a virtual resource that can be used to change advanced configuration\noptions for a DigitalOcean managed MongoDB database cluster.\n\n\u003e **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as digitalocean from \"@pulumi/digitalocean\";\n\nconst exampleDatabaseCluster = new digitalocean.DatabaseCluster(\"example\", {\n name: \"example-mongodb-cluster\",\n engine: \"mongodb\",\n version: \"7\",\n size: digitalocean.DatabaseSlug.DB_1VPCU1GB,\n region: digitalocean.Region.NYC3,\n nodeCount: 1,\n});\nconst example = new digitalocean.DatabaseMongodbConfig(\"example\", {\n clusterId: exampleDatabaseCluster.id,\n defaultReadConcern: \"majority\",\n defaultWriteConcern: \"majority\",\n transactionLifetimeLimitSeconds: 100,\n slowOpThresholdMs: 100,\n verbosity: 3,\n});\n```\n```python\nimport pulumi\nimport pulumi_digitalocean as digitalocean\n\nexample_database_cluster = digitalocean.DatabaseCluster(\"example\",\n name=\"example-mongodb-cluster\",\n engine=\"mongodb\",\n version=\"7\",\n size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB,\n region=digitalocean.Region.NYC3,\n node_count=1)\nexample = digitalocean.DatabaseMongodbConfig(\"example\",\n cluster_id=example_database_cluster.id,\n default_read_concern=\"majority\",\n default_write_concern=\"majority\",\n transaction_lifetime_limit_seconds=100,\n slow_op_threshold_ms=100,\n verbosity=3)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing DigitalOcean = Pulumi.DigitalOcean;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster(\"example\", new()\n {\n Name = \"example-mongodb-cluster\",\n Engine = \"mongodb\",\n Version = \"7\",\n Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB,\n Region = DigitalOcean.Region.NYC3,\n NodeCount = 1,\n });\n\n var example = new DigitalOcean.DatabaseMongodbConfig(\"example\", new()\n {\n ClusterId = exampleDatabaseCluster.Id,\n DefaultReadConcern = \"majority\",\n DefaultWriteConcern = \"majority\",\n TransactionLifetimeLimitSeconds = 100,\n SlowOpThresholdMs = 100,\n Verbosity = 3,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, \"example\", \u0026digitalocean.DatabaseClusterArgs{\n\t\t\tName: pulumi.String(\"example-mongodb-cluster\"),\n\t\t\tEngine: pulumi.String(\"mongodb\"),\n\t\t\tVersion: pulumi.String(\"7\"),\n\t\t\tSize: pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB),\n\t\t\tRegion: pulumi.String(digitalocean.RegionNYC3),\n\t\t\tNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = digitalocean.NewDatabaseMongodbConfig(ctx, \"example\", \u0026digitalocean.DatabaseMongodbConfigArgs{\n\t\t\tClusterId: exampleDatabaseCluster.ID(),\n\t\t\tDefaultReadConcern: pulumi.String(\"majority\"),\n\t\t\tDefaultWriteConcern: pulumi.String(\"majority\"),\n\t\t\tTransactionLifetimeLimitSeconds: pulumi.Int(100),\n\t\t\tSlowOpThresholdMs: pulumi.Int(100),\n\t\t\tVerbosity: pulumi.Int(3),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.digitalocean.DatabaseCluster;\nimport com.pulumi.digitalocean.DatabaseClusterArgs;\nimport com.pulumi.digitalocean.DatabaseMongodbConfig;\nimport com.pulumi.digitalocean.DatabaseMongodbConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var exampleDatabaseCluster = new DatabaseCluster(\"exampleDatabaseCluster\", DatabaseClusterArgs.builder()\n .name(\"example-mongodb-cluster\")\n .engine(\"mongodb\")\n .version(\"7\")\n .size(\"db-s-1vcpu-1gb\")\n .region(\"nyc3\")\n .nodeCount(1)\n .build());\n\n var example = new DatabaseMongodbConfig(\"example\", DatabaseMongodbConfigArgs.builder()\n .clusterId(exampleDatabaseCluster.id())\n .defaultReadConcern(\"majority\")\n .defaultWriteConcern(\"majority\")\n .transactionLifetimeLimitSeconds(100)\n .slowOpThresholdMs(100)\n .verbosity(3)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: digitalocean:DatabaseMongodbConfig\n properties:\n clusterId: ${exampleDatabaseCluster.id}\n defaultReadConcern: majority\n defaultWriteConcern: majority\n transactionLifetimeLimitSeconds: 100\n slowOpThresholdMs: 100\n verbosity: 3\n exampleDatabaseCluster:\n type: digitalocean:DatabaseCluster\n name: example\n properties:\n name: example-mongodb-cluster\n engine: mongodb\n version: '7'\n size: db-s-1vcpu-1gb\n region: nyc3\n nodeCount: 1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nA MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g.\n\n```sh\n$ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712\n```\n\n", + "properties": { + "clusterId": { + "type": "string", + "description": "The ID of the target MongoDB cluster.\n" + }, + "defaultReadConcern": { + "type": "string", + "description": "Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/).\n" + }, + "defaultWriteConcern": { + "type": "string", + "description": "Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/).\n" + }, + "slowOpThresholdMs": { + "type": "integer", + "description": "Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs).\n" + }, + "transactionLifetimeLimitSeconds": { + "type": "integer", + "description": "Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds).\n" + }, + "verbosity": { + "type": "integer", + "description": "The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity).\n" + } + }, + "required": [ + "clusterId", + "defaultReadConcern", + "defaultWriteConcern", + "slowOpThresholdMs", + "transactionLifetimeLimitSeconds", + "verbosity" + ], + "inputProperties": { + "clusterId": { + "type": "string", + "description": "The ID of the target MongoDB cluster.\n", + "willReplaceOnChanges": true + }, + "defaultReadConcern": { + "type": "string", + "description": "Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/).\n" + }, + "defaultWriteConcern": { + "type": "string", + "description": "Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/).\n" + }, + "slowOpThresholdMs": { + "type": "integer", + "description": "Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs).\n" + }, + "transactionLifetimeLimitSeconds": { + "type": "integer", + "description": "Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds).\n" + }, + "verbosity": { + "type": "integer", + "description": "The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity).\n" + } + }, + "requiredInputs": [ + "clusterId" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering DatabaseMongodbConfig resources.\n", + "properties": { + "clusterId": { + "type": "string", + "description": "The ID of the target MongoDB cluster.\n", + "willReplaceOnChanges": true + }, + "defaultReadConcern": { + "type": "string", + "description": "Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/).\n" + }, + "defaultWriteConcern": { + "type": "string", + "description": "Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/).\n" + }, + "slowOpThresholdMs": { + "type": "integer", + "description": "Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs).\n" + }, + "transactionLifetimeLimitSeconds": { + "type": "integer", + "description": "Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds).\n" + }, + "verbosity": { + "type": "integer", + "description": "The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. \u003cem\u003eChanging this parameter will lead to a restart of the MongoDB service.\u003c/em\u003e Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity).\n" + } + }, + "type": "object" + } + }, "digitalocean:index/databaseMysqlConfig:DatabaseMysqlConfig": { "description": "Provides a virtual resource that can be used to change advanced configuration\noptions for a DigitalOcean managed MySQL database cluster.\n\n\u003e **Note** MySQL configurations are only removed from state when destroyed. The remote configuration is not unset.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as digitalocean from \"@pulumi/digitalocean\";\n\nconst exampleDatabaseCluster = new digitalocean.DatabaseCluster(\"example\", {\n name: \"example-mysql-cluster\",\n engine: \"mysql\",\n version: \"8\",\n size: digitalocean.DatabaseSlug.DB_1VPCU1GB,\n region: digitalocean.Region.NYC1,\n nodeCount: 1,\n});\nconst example = new digitalocean.DatabaseMysqlConfig(\"example\", {\n clusterId: exampleDatabaseCluster.id,\n connectTimeout: 10,\n defaultTimeZone: \"UTC\",\n});\n```\n```python\nimport pulumi\nimport pulumi_digitalocean as digitalocean\n\nexample_database_cluster = digitalocean.DatabaseCluster(\"example\",\n name=\"example-mysql-cluster\",\n engine=\"mysql\",\n version=\"8\",\n size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB,\n region=digitalocean.Region.NYC1,\n node_count=1)\nexample = digitalocean.DatabaseMysqlConfig(\"example\",\n cluster_id=example_database_cluster.id,\n connect_timeout=10,\n default_time_zone=\"UTC\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing DigitalOcean = Pulumi.DigitalOcean;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster(\"example\", new()\n {\n Name = \"example-mysql-cluster\",\n Engine = \"mysql\",\n Version = \"8\",\n Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB,\n Region = DigitalOcean.Region.NYC1,\n NodeCount = 1,\n });\n\n var example = new DigitalOcean.DatabaseMysqlConfig(\"example\", new()\n {\n ClusterId = exampleDatabaseCluster.Id,\n ConnectTimeout = 10,\n DefaultTimeZone = \"UTC\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, \"example\", \u0026digitalocean.DatabaseClusterArgs{\n\t\t\tName: pulumi.String(\"example-mysql-cluster\"),\n\t\t\tEngine: pulumi.String(\"mysql\"),\n\t\t\tVersion: pulumi.String(\"8\"),\n\t\t\tSize: pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB),\n\t\t\tRegion: pulumi.String(digitalocean.RegionNYC1),\n\t\t\tNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = digitalocean.NewDatabaseMysqlConfig(ctx, \"example\", \u0026digitalocean.DatabaseMysqlConfigArgs{\n\t\t\tClusterId: exampleDatabaseCluster.ID(),\n\t\t\tConnectTimeout: pulumi.Int(10),\n\t\t\tDefaultTimeZone: pulumi.String(\"UTC\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.digitalocean.DatabaseCluster;\nimport com.pulumi.digitalocean.DatabaseClusterArgs;\nimport com.pulumi.digitalocean.DatabaseMysqlConfig;\nimport com.pulumi.digitalocean.DatabaseMysqlConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var exampleDatabaseCluster = new DatabaseCluster(\"exampleDatabaseCluster\", DatabaseClusterArgs.builder()\n .name(\"example-mysql-cluster\")\n .engine(\"mysql\")\n .version(\"8\")\n .size(\"db-s-1vcpu-1gb\")\n .region(\"nyc1\")\n .nodeCount(1)\n .build());\n\n var example = new DatabaseMysqlConfig(\"example\", DatabaseMysqlConfigArgs.builder()\n .clusterId(exampleDatabaseCluster.id())\n .connectTimeout(10)\n .defaultTimeZone(\"UTC\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: digitalocean:DatabaseMysqlConfig\n properties:\n clusterId: ${exampleDatabaseCluster.id}\n connectTimeout: 10\n defaultTimeZone: UTC\n exampleDatabaseCluster:\n type: digitalocean:DatabaseCluster\n name: example\n properties:\n name: example-mysql-cluster\n engine: mysql\n version: '8'\n size: db-s-1vcpu-1gb\n region: nyc1\n nodeCount: 1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nA MySQL database cluster's configuration can be imported using the `id` the parent cluster, e.g.\n\n```sh\n$ pulumi import digitalocean:index/databaseMysqlConfig:DatabaseMysqlConfig example 4b62829a-9c42-465b-aaa3-84051048e712\n```\n\n", "properties": { diff --git a/provider/go.mod b/provider/go.mod index 446f36b4..0c1e5789 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -77,7 +77,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.5.0 // indirect - github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755 // indirect + github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 // indirect github.com/djherbis/times v1.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -232,12 +232,12 @@ require ( golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/api v0.169.0 // indirect diff --git a/provider/go.sum b/provider/go.sum index f3ae98ba..a8929384 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -1335,8 +1335,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.5.0 h1:hn6cEZtQ0h3J8kFrHR/NrzyOoTnjgW1+FmNJzQ7y/sA= github.com/deckarep/golang-set/v2 v2.5.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755 h1:21uc6tNgFS/5MiYz+KzDhf5tVO38TN8FPO6803yNAjI= -github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 h1:G/lf5YrNl4bDJyp3yJRld3J5BTFpQStYJHEnE6SxigY= +github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -2326,8 +2326,8 @@ golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQ golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2461,8 +2461,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2514,8 +2514,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/sdk/dotnet/DatabaseKafkaConfig.cs b/sdk/dotnet/DatabaseKafkaConfig.cs new file mode 100644 index 00000000..43bc60a7 --- /dev/null +++ b/sdk/dotnet/DatabaseKafkaConfig.cs @@ -0,0 +1,457 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.DigitalOcean +{ + /// + /// Provides a virtual resource that can be used to change advanced configuration + /// options for a DigitalOcean managed Kafka database cluster. + /// + /// > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using DigitalOcean = Pulumi.DigitalOcean; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster("example", new() + /// { + /// Name = "example-kafka-cluster", + /// Engine = "kafka", + /// Version = "3.7", + /// Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB, + /// Region = DigitalOcean.Region.NYC3, + /// NodeCount = 3, + /// }); + /// + /// var example = new DigitalOcean.DatabaseKafkaConfig("example", new() + /// { + /// ClusterId = exampleDatabaseCluster.Id, + /// GroupInitialRebalanceDelayMs = 3000, + /// GroupMinSessionTimeoutMs = 6000, + /// GroupMaxSessionTimeoutMs = 1800000, + /// MessageMaxBytes = 1048588, + /// LogCleanerDeleteRetentionMs = 86400000, + /// LogCleanerMinCompactionLagMs = "0", + /// LogFlushIntervalMs = "9223372036854775807", + /// LogIndexIntervalBytes = 4096, + /// LogMessageDownconversionEnable = true, + /// LogMessageTimestampDifferenceMaxMs = "9223372036854775807", + /// LogPreallocate = false, + /// LogRetentionBytes = "-1", + /// LogRetentionHours = 168, + /// LogRetentionMs = "604800000", + /// LogRollJitterMs = "0", + /// LogSegmentDeleteDelayMs = 60000, + /// AutoCreateTopicsEnable = true, + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. + /// + /// ```sh + /// $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + /// ``` + /// + [DigitalOceanResourceType("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig")] + public partial class DatabaseKafkaConfig : global::Pulumi.CustomResource + { + /// + /// Enable auto creation of topics. + /// + [Output("autoCreateTopicsEnable")] + public Output AutoCreateTopicsEnable { get; private set; } = null!; + + /// + /// The ID of the target Kafka cluster. + /// + [Output("clusterId")] + public Output ClusterId { get; private set; } = null!; + + /// + /// The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + /// + [Output("groupInitialRebalanceDelayMs")] + public Output GroupInitialRebalanceDelayMs { get; private set; } = null!; + + /// + /// The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Output("groupMaxSessionTimeoutMs")] + public Output GroupMaxSessionTimeoutMs { get; private set; } = null!; + + /// + /// The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Output("groupMinSessionTimeoutMs")] + public Output GroupMinSessionTimeoutMs { get; private set; } = null!; + + /// + /// How long are delete records retained? + /// + [Output("logCleanerDeleteRetentionMs")] + public Output LogCleanerDeleteRetentionMs { get; private set; } = null!; + + /// + /// The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + /// + [Output("logCleanerMinCompactionLagMs")] + public Output LogCleanerMinCompactionLagMs { get; private set; } = null!; + + /// + /// The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + /// + [Output("logFlushIntervalMs")] + public Output LogFlushIntervalMs { get; private set; } = null!; + + /// + /// The interval with which Kafka adds an entry to the offset index. + /// + [Output("logIndexIntervalBytes")] + public Output LogIndexIntervalBytes { get; private set; } = null!; + + /// + /// This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + /// + [Output("logMessageDownconversionEnable")] + public Output LogMessageDownconversionEnable { get; private set; } = null!; + + /// + /// The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + /// + [Output("logMessageTimestampDifferenceMaxMs")] + public Output LogMessageTimestampDifferenceMaxMs { get; private set; } = null!; + + /// + /// Controls whether to preallocate a file when creating a new segment. + /// + [Output("logPreallocate")] + public Output LogPreallocate { get; private set; } = null!; + + /// + /// The maximum size of the log before deleting messages. + /// + [Output("logRetentionBytes")] + public Output LogRetentionBytes { get; private set; } = null!; + + /// + /// The number of hours to keep a log file before deleting it. + /// + [Output("logRetentionHours")] + public Output LogRetentionHours { get; private set; } = null!; + + /// + /// The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + /// + [Output("logRetentionMs")] + public Output LogRetentionMs { get; private set; } = null!; + + /// + /// The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + /// + [Output("logRollJitterMs")] + public Output LogRollJitterMs { get; private set; } = null!; + + /// + /// The amount of time to wait before deleting a file from the filesystem. + /// + [Output("logSegmentDeleteDelayMs")] + public Output LogSegmentDeleteDelayMs { get; private set; } = null!; + + /// + /// The maximum size of message that the server can receive. + /// + [Output("messageMaxBytes")] + public Output MessageMaxBytes { get; private set; } = null!; + + + /// + /// Create a DatabaseKafkaConfig resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public DatabaseKafkaConfig(string name, DatabaseKafkaConfigArgs args, CustomResourceOptions? options = null) + : base("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, args ?? new DatabaseKafkaConfigArgs(), MakeResourceOptions(options, "")) + { + } + + private DatabaseKafkaConfig(string name, Input id, DatabaseKafkaConfigState? state = null, CustomResourceOptions? options = null) + : base("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing DatabaseKafkaConfig resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static DatabaseKafkaConfig Get(string name, Input id, DatabaseKafkaConfigState? state = null, CustomResourceOptions? options = null) + { + return new DatabaseKafkaConfig(name, id, state, options); + } + } + + public sealed class DatabaseKafkaConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// Enable auto creation of topics. + /// + [Input("autoCreateTopicsEnable")] + public Input? AutoCreateTopicsEnable { get; set; } + + /// + /// The ID of the target Kafka cluster. + /// + [Input("clusterId", required: true)] + public Input ClusterId { get; set; } = null!; + + /// + /// The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + /// + [Input("groupInitialRebalanceDelayMs")] + public Input? GroupInitialRebalanceDelayMs { get; set; } + + /// + /// The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Input("groupMaxSessionTimeoutMs")] + public Input? GroupMaxSessionTimeoutMs { get; set; } + + /// + /// The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Input("groupMinSessionTimeoutMs")] + public Input? GroupMinSessionTimeoutMs { get; set; } + + /// + /// How long are delete records retained? + /// + [Input("logCleanerDeleteRetentionMs")] + public Input? LogCleanerDeleteRetentionMs { get; set; } + + /// + /// The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + /// + [Input("logCleanerMinCompactionLagMs")] + public Input? LogCleanerMinCompactionLagMs { get; set; } + + /// + /// The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + /// + [Input("logFlushIntervalMs")] + public Input? LogFlushIntervalMs { get; set; } + + /// + /// The interval with which Kafka adds an entry to the offset index. + /// + [Input("logIndexIntervalBytes")] + public Input? LogIndexIntervalBytes { get; set; } + + /// + /// This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + /// + [Input("logMessageDownconversionEnable")] + public Input? LogMessageDownconversionEnable { get; set; } + + /// + /// The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + /// + [Input("logMessageTimestampDifferenceMaxMs")] + public Input? LogMessageTimestampDifferenceMaxMs { get; set; } + + /// + /// Controls whether to preallocate a file when creating a new segment. + /// + [Input("logPreallocate")] + public Input? LogPreallocate { get; set; } + + /// + /// The maximum size of the log before deleting messages. + /// + [Input("logRetentionBytes")] + public Input? LogRetentionBytes { get; set; } + + /// + /// The number of hours to keep a log file before deleting it. + /// + [Input("logRetentionHours")] + public Input? LogRetentionHours { get; set; } + + /// + /// The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + /// + [Input("logRetentionMs")] + public Input? LogRetentionMs { get; set; } + + /// + /// The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + /// + [Input("logRollJitterMs")] + public Input? LogRollJitterMs { get; set; } + + /// + /// The amount of time to wait before deleting a file from the filesystem. + /// + [Input("logSegmentDeleteDelayMs")] + public Input? LogSegmentDeleteDelayMs { get; set; } + + /// + /// The maximum size of message that the server can receive. + /// + [Input("messageMaxBytes")] + public Input? MessageMaxBytes { get; set; } + + public DatabaseKafkaConfigArgs() + { + } + public static new DatabaseKafkaConfigArgs Empty => new DatabaseKafkaConfigArgs(); + } + + public sealed class DatabaseKafkaConfigState : global::Pulumi.ResourceArgs + { + /// + /// Enable auto creation of topics. + /// + [Input("autoCreateTopicsEnable")] + public Input? AutoCreateTopicsEnable { get; set; } + + /// + /// The ID of the target Kafka cluster. + /// + [Input("clusterId")] + public Input? ClusterId { get; set; } + + /// + /// The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + /// + [Input("groupInitialRebalanceDelayMs")] + public Input? GroupInitialRebalanceDelayMs { get; set; } + + /// + /// The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Input("groupMaxSessionTimeoutMs")] + public Input? GroupMaxSessionTimeoutMs { get; set; } + + /// + /// The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + /// + [Input("groupMinSessionTimeoutMs")] + public Input? GroupMinSessionTimeoutMs { get; set; } + + /// + /// How long are delete records retained? + /// + [Input("logCleanerDeleteRetentionMs")] + public Input? LogCleanerDeleteRetentionMs { get; set; } + + /// + /// The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + /// + [Input("logCleanerMinCompactionLagMs")] + public Input? LogCleanerMinCompactionLagMs { get; set; } + + /// + /// The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + /// + [Input("logFlushIntervalMs")] + public Input? LogFlushIntervalMs { get; set; } + + /// + /// The interval with which Kafka adds an entry to the offset index. + /// + [Input("logIndexIntervalBytes")] + public Input? LogIndexIntervalBytes { get; set; } + + /// + /// This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + /// + [Input("logMessageDownconversionEnable")] + public Input? LogMessageDownconversionEnable { get; set; } + + /// + /// The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + /// + [Input("logMessageTimestampDifferenceMaxMs")] + public Input? LogMessageTimestampDifferenceMaxMs { get; set; } + + /// + /// Controls whether to preallocate a file when creating a new segment. + /// + [Input("logPreallocate")] + public Input? LogPreallocate { get; set; } + + /// + /// The maximum size of the log before deleting messages. + /// + [Input("logRetentionBytes")] + public Input? LogRetentionBytes { get; set; } + + /// + /// The number of hours to keep a log file before deleting it. + /// + [Input("logRetentionHours")] + public Input? LogRetentionHours { get; set; } + + /// + /// The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + /// + [Input("logRetentionMs")] + public Input? LogRetentionMs { get; set; } + + /// + /// The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + /// + [Input("logRollJitterMs")] + public Input? LogRollJitterMs { get; set; } + + /// + /// The amount of time to wait before deleting a file from the filesystem. + /// + [Input("logSegmentDeleteDelayMs")] + public Input? LogSegmentDeleteDelayMs { get; set; } + + /// + /// The maximum size of message that the server can receive. + /// + [Input("messageMaxBytes")] + public Input? MessageMaxBytes { get; set; } + + public DatabaseKafkaConfigState() + { + } + public static new DatabaseKafkaConfigState Empty => new DatabaseKafkaConfigState(); + } +} diff --git a/sdk/dotnet/DatabaseMongodbConfig.cs b/sdk/dotnet/DatabaseMongodbConfig.cs new file mode 100644 index 00000000..c80de1b5 --- /dev/null +++ b/sdk/dotnet/DatabaseMongodbConfig.cs @@ -0,0 +1,229 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.DigitalOcean +{ + /// + /// Provides a virtual resource that can be used to change advanced configuration + /// options for a DigitalOcean managed MongoDB database cluster. + /// + /// > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using DigitalOcean = Pulumi.DigitalOcean; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster("example", new() + /// { + /// Name = "example-mongodb-cluster", + /// Engine = "mongodb", + /// Version = "7", + /// Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB, + /// Region = DigitalOcean.Region.NYC3, + /// NodeCount = 1, + /// }); + /// + /// var example = new DigitalOcean.DatabaseMongodbConfig("example", new() + /// { + /// ClusterId = exampleDatabaseCluster.Id, + /// DefaultReadConcern = "majority", + /// DefaultWriteConcern = "majority", + /// TransactionLifetimeLimitSeconds = 100, + /// SlowOpThresholdMs = 100, + /// Verbosity = 3, + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. + /// + /// ```sh + /// $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + /// ``` + /// + [DigitalOceanResourceType("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig")] + public partial class DatabaseMongodbConfig : global::Pulumi.CustomResource + { + /// + /// The ID of the target MongoDB cluster. + /// + [Output("clusterId")] + public Output ClusterId { get; private set; } = null!; + + /// + /// Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + /// + [Output("defaultReadConcern")] + public Output DefaultReadConcern { get; private set; } = null!; + + /// + /// Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + /// + [Output("defaultWriteConcern")] + public Output DefaultWriteConcern { get; private set; } = null!; + + /// + /// Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + /// + [Output("slowOpThresholdMs")] + public Output SlowOpThresholdMs { get; private set; } = null!; + + /// + /// Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + /// + [Output("transactionLifetimeLimitSeconds")] + public Output TransactionLifetimeLimitSeconds { get; private set; } = null!; + + /// + /// The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + /// + [Output("verbosity")] + public Output Verbosity { get; private set; } = null!; + + + /// + /// Create a DatabaseMongodbConfig resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public DatabaseMongodbConfig(string name, DatabaseMongodbConfigArgs args, CustomResourceOptions? options = null) + : base("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, args ?? new DatabaseMongodbConfigArgs(), MakeResourceOptions(options, "")) + { + } + + private DatabaseMongodbConfig(string name, Input id, DatabaseMongodbConfigState? state = null, CustomResourceOptions? options = null) + : base("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing DatabaseMongodbConfig resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static DatabaseMongodbConfig Get(string name, Input id, DatabaseMongodbConfigState? state = null, CustomResourceOptions? options = null) + { + return new DatabaseMongodbConfig(name, id, state, options); + } + } + + public sealed class DatabaseMongodbConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the target MongoDB cluster. + /// + [Input("clusterId", required: true)] + public Input ClusterId { get; set; } = null!; + + /// + /// Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + /// + [Input("defaultReadConcern")] + public Input? DefaultReadConcern { get; set; } + + /// + /// Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + /// + [Input("defaultWriteConcern")] + public Input? DefaultWriteConcern { get; set; } + + /// + /// Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + /// + [Input("slowOpThresholdMs")] + public Input? SlowOpThresholdMs { get; set; } + + /// + /// Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + /// + [Input("transactionLifetimeLimitSeconds")] + public Input? TransactionLifetimeLimitSeconds { get; set; } + + /// + /// The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + /// + [Input("verbosity")] + public Input? Verbosity { get; set; } + + public DatabaseMongodbConfigArgs() + { + } + public static new DatabaseMongodbConfigArgs Empty => new DatabaseMongodbConfigArgs(); + } + + public sealed class DatabaseMongodbConfigState : global::Pulumi.ResourceArgs + { + /// + /// The ID of the target MongoDB cluster. + /// + [Input("clusterId")] + public Input? ClusterId { get; set; } + + /// + /// Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + /// + [Input("defaultReadConcern")] + public Input? DefaultReadConcern { get; set; } + + /// + /// Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + /// + [Input("defaultWriteConcern")] + public Input? DefaultWriteConcern { get; set; } + + /// + /// Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + /// + [Input("slowOpThresholdMs")] + public Input? SlowOpThresholdMs { get; set; } + + /// + /// Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + /// + [Input("transactionLifetimeLimitSeconds")] + public Input? TransactionLifetimeLimitSeconds { get; set; } + + /// + /// The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + /// + [Input("verbosity")] + public Input? Verbosity { get; set; } + + public DatabaseMongodbConfigState() + { + } + public static new DatabaseMongodbConfigState Empty => new DatabaseMongodbConfigState(); + } +} diff --git a/sdk/dotnet/Inputs/DatabaseUserSettingArgs.cs b/sdk/dotnet/Inputs/DatabaseUserSettingArgs.cs index 68d3a7aa..83961619 100644 --- a/sdk/dotnet/Inputs/DatabaseUserSettingArgs.cs +++ b/sdk/dotnet/Inputs/DatabaseUserSettingArgs.cs @@ -26,6 +26,14 @@ public InputList Acls set => _acls = value; } + [Input("opensearchAcls")] + private InputList? _opensearchAcls; + public InputList OpensearchAcls + { + get => _opensearchAcls ?? (_opensearchAcls = new InputList()); + set => _opensearchAcls = value; + } + public DatabaseUserSettingArgs() { } diff --git a/sdk/dotnet/Inputs/DatabaseUserSettingGetArgs.cs b/sdk/dotnet/Inputs/DatabaseUserSettingGetArgs.cs index b49692fc..aeda79a5 100644 --- a/sdk/dotnet/Inputs/DatabaseUserSettingGetArgs.cs +++ b/sdk/dotnet/Inputs/DatabaseUserSettingGetArgs.cs @@ -26,6 +26,14 @@ public InputList Acls set => _acls = value; } + [Input("opensearchAcls")] + private InputList? _opensearchAcls; + public InputList OpensearchAcls + { + get => _opensearchAcls ?? (_opensearchAcls = new InputList()); + set => _opensearchAcls = value; + } + public DatabaseUserSettingGetArgs() { } diff --git a/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclArgs.cs b/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclArgs.cs new file mode 100644 index 00000000..31b91c4e --- /dev/null +++ b/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclArgs.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.DigitalOcean.Inputs +{ + + public sealed class DatabaseUserSettingOpensearchAclArgs : global::Pulumi.ResourceArgs + { + [Input("index", required: true)] + public Input Index { get; set; } = null!; + + /// + /// The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + /// + [Input("permission", required: true)] + public Input Permission { get; set; } = null!; + + public DatabaseUserSettingOpensearchAclArgs() + { + } + public static new DatabaseUserSettingOpensearchAclArgs Empty => new DatabaseUserSettingOpensearchAclArgs(); + } +} diff --git a/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclGetArgs.cs b/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclGetArgs.cs new file mode 100644 index 00000000..2cd8620c --- /dev/null +++ b/sdk/dotnet/Inputs/DatabaseUserSettingOpensearchAclGetArgs.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.DigitalOcean.Inputs +{ + + public sealed class DatabaseUserSettingOpensearchAclGetArgs : global::Pulumi.ResourceArgs + { + [Input("index", required: true)] + public Input Index { get; set; } = null!; + + /// + /// The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + /// + [Input("permission", required: true)] + public Input Permission { get; set; } = null!; + + public DatabaseUserSettingOpensearchAclGetArgs() + { + } + public static new DatabaseUserSettingOpensearchAclGetArgs Empty => new DatabaseUserSettingOpensearchAclGetArgs(); + } +} diff --git a/sdk/dotnet/Outputs/DatabaseUserSetting.cs b/sdk/dotnet/Outputs/DatabaseUserSetting.cs index ef2bda74..bbe9bc3a 100644 --- a/sdk/dotnet/Outputs/DatabaseUserSetting.cs +++ b/sdk/dotnet/Outputs/DatabaseUserSetting.cs @@ -19,11 +19,16 @@ public sealed class DatabaseUserSetting /// An individual ACL includes the following: /// public readonly ImmutableArray Acls; + public readonly ImmutableArray OpensearchAcls; [OutputConstructor] - private DatabaseUserSetting(ImmutableArray acls) + private DatabaseUserSetting( + ImmutableArray acls, + + ImmutableArray opensearchAcls) { Acls = acls; + OpensearchAcls = opensearchAcls; } } } diff --git a/sdk/dotnet/Outputs/DatabaseUserSettingOpensearchAcl.cs b/sdk/dotnet/Outputs/DatabaseUserSettingOpensearchAcl.cs new file mode 100644 index 00000000..f6e547be --- /dev/null +++ b/sdk/dotnet/Outputs/DatabaseUserSettingOpensearchAcl.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.DigitalOcean.Outputs +{ + + [OutputType] + public sealed class DatabaseUserSettingOpensearchAcl + { + public readonly string Index; + /// + /// The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + /// + public readonly string Permission; + + [OutputConstructor] + private DatabaseUserSettingOpensearchAcl( + string index, + + string permission) + { + Index = index; + Permission = permission; + } + } +} diff --git a/sdk/go/digitalocean/databaseKafkaConfig.go b/sdk/go/digitalocean/databaseKafkaConfig.go new file mode 100644 index 00000000..8591091e --- /dev/null +++ b/sdk/go/digitalocean/databaseKafkaConfig.go @@ -0,0 +1,539 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package digitalocean + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Provides a virtual resource that can be used to change advanced configuration +// options for a DigitalOcean managed Kafka database cluster. +// +// > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// exampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, "example", &digitalocean.DatabaseClusterArgs{ +// Name: pulumi.String("example-kafka-cluster"), +// Engine: pulumi.String("kafka"), +// Version: pulumi.String("3.7"), +// Size: pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB), +// Region: pulumi.String(digitalocean.RegionNYC3), +// NodeCount: pulumi.Int(3), +// }) +// if err != nil { +// return err +// } +// _, err = digitalocean.NewDatabaseKafkaConfig(ctx, "example", &digitalocean.DatabaseKafkaConfigArgs{ +// ClusterId: exampleDatabaseCluster.ID(), +// GroupInitialRebalanceDelayMs: pulumi.Int(3000), +// GroupMinSessionTimeoutMs: pulumi.Int(6000), +// GroupMaxSessionTimeoutMs: pulumi.Int(1800000), +// MessageMaxBytes: pulumi.Int(1048588), +// LogCleanerDeleteRetentionMs: pulumi.Int(86400000), +// LogCleanerMinCompactionLagMs: pulumi.String("0"), +// LogFlushIntervalMs: pulumi.String("9223372036854775807"), +// LogIndexIntervalBytes: pulumi.Int(4096), +// LogMessageDownconversionEnable: pulumi.Bool(true), +// LogMessageTimestampDifferenceMaxMs: pulumi.String("9223372036854775807"), +// LogPreallocate: pulumi.Bool(false), +// LogRetentionBytes: pulumi.String("-1"), +// LogRetentionHours: pulumi.Int(168), +// LogRetentionMs: pulumi.String("604800000"), +// LogRollJitterMs: pulumi.String("0"), +// LogSegmentDeleteDelayMs: pulumi.Int(60000), +// AutoCreateTopicsEnable: pulumi.Bool(true), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. +// +// ```sh +// $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 +// ``` +type DatabaseKafkaConfig struct { + pulumi.CustomResourceState + + // Enable auto creation of topics. + AutoCreateTopicsEnable pulumi.BoolOutput `pulumi:"autoCreateTopicsEnable"` + // The ID of the target Kafka cluster. + ClusterId pulumi.StringOutput `pulumi:"clusterId"` + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMs pulumi.IntOutput `pulumi:"groupInitialRebalanceDelayMs"` + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMs pulumi.IntOutput `pulumi:"groupMaxSessionTimeoutMs"` + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMs pulumi.IntOutput `pulumi:"groupMinSessionTimeoutMs"` + // How long are delete records retained? + LogCleanerDeleteRetentionMs pulumi.IntOutput `pulumi:"logCleanerDeleteRetentionMs"` + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMs pulumi.StringOutput `pulumi:"logCleanerMinCompactionLagMs"` + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + LogFlushIntervalMs pulumi.StringOutput `pulumi:"logFlushIntervalMs"` + // The interval with which Kafka adds an entry to the offset index. + LogIndexIntervalBytes pulumi.IntOutput `pulumi:"logIndexIntervalBytes"` + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable pulumi.BoolOutput `pulumi:"logMessageDownconversionEnable"` + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + LogMessageTimestampDifferenceMaxMs pulumi.StringOutput `pulumi:"logMessageTimestampDifferenceMaxMs"` + // Controls whether to preallocate a file when creating a new segment. + LogPreallocate pulumi.BoolOutput `pulumi:"logPreallocate"` + // The maximum size of the log before deleting messages. + LogRetentionBytes pulumi.StringOutput `pulumi:"logRetentionBytes"` + // The number of hours to keep a log file before deleting it. + LogRetentionHours pulumi.IntOutput `pulumi:"logRetentionHours"` + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMs pulumi.StringOutput `pulumi:"logRetentionMs"` + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + LogRollJitterMs pulumi.StringOutput `pulumi:"logRollJitterMs"` + // The amount of time to wait before deleting a file from the filesystem. + LogSegmentDeleteDelayMs pulumi.IntOutput `pulumi:"logSegmentDeleteDelayMs"` + // The maximum size of message that the server can receive. + MessageMaxBytes pulumi.IntOutput `pulumi:"messageMaxBytes"` +} + +// NewDatabaseKafkaConfig registers a new resource with the given unique name, arguments, and options. +func NewDatabaseKafkaConfig(ctx *pulumi.Context, + name string, args *DatabaseKafkaConfigArgs, opts ...pulumi.ResourceOption) (*DatabaseKafkaConfig, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.ClusterId == nil { + return nil, errors.New("invalid value for required argument 'ClusterId'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource DatabaseKafkaConfig + err := ctx.RegisterResource("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetDatabaseKafkaConfig gets an existing DatabaseKafkaConfig resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetDatabaseKafkaConfig(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *DatabaseKafkaConfigState, opts ...pulumi.ResourceOption) (*DatabaseKafkaConfig, error) { + var resource DatabaseKafkaConfig + err := ctx.ReadResource("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering DatabaseKafkaConfig resources. +type databaseKafkaConfigState struct { + // Enable auto creation of topics. + AutoCreateTopicsEnable *bool `pulumi:"autoCreateTopicsEnable"` + // The ID of the target Kafka cluster. + ClusterId *string `pulumi:"clusterId"` + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMs *int `pulumi:"groupInitialRebalanceDelayMs"` + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMs *int `pulumi:"groupMaxSessionTimeoutMs"` + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMs *int `pulumi:"groupMinSessionTimeoutMs"` + // How long are delete records retained? + LogCleanerDeleteRetentionMs *int `pulumi:"logCleanerDeleteRetentionMs"` + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMs *string `pulumi:"logCleanerMinCompactionLagMs"` + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + LogFlushIntervalMs *string `pulumi:"logFlushIntervalMs"` + // The interval with which Kafka adds an entry to the offset index. + LogIndexIntervalBytes *int `pulumi:"logIndexIntervalBytes"` + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable *bool `pulumi:"logMessageDownconversionEnable"` + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + LogMessageTimestampDifferenceMaxMs *string `pulumi:"logMessageTimestampDifferenceMaxMs"` + // Controls whether to preallocate a file when creating a new segment. + LogPreallocate *bool `pulumi:"logPreallocate"` + // The maximum size of the log before deleting messages. + LogRetentionBytes *string `pulumi:"logRetentionBytes"` + // The number of hours to keep a log file before deleting it. + LogRetentionHours *int `pulumi:"logRetentionHours"` + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMs *string `pulumi:"logRetentionMs"` + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + LogRollJitterMs *string `pulumi:"logRollJitterMs"` + // The amount of time to wait before deleting a file from the filesystem. + LogSegmentDeleteDelayMs *int `pulumi:"logSegmentDeleteDelayMs"` + // The maximum size of message that the server can receive. + MessageMaxBytes *int `pulumi:"messageMaxBytes"` +} + +type DatabaseKafkaConfigState struct { + // Enable auto creation of topics. + AutoCreateTopicsEnable pulumi.BoolPtrInput + // The ID of the target Kafka cluster. + ClusterId pulumi.StringPtrInput + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMs pulumi.IntPtrInput + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMs pulumi.IntPtrInput + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMs pulumi.IntPtrInput + // How long are delete records retained? + LogCleanerDeleteRetentionMs pulumi.IntPtrInput + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMs pulumi.StringPtrInput + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + LogFlushIntervalMs pulumi.StringPtrInput + // The interval with which Kafka adds an entry to the offset index. + LogIndexIntervalBytes pulumi.IntPtrInput + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable pulumi.BoolPtrInput + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + LogMessageTimestampDifferenceMaxMs pulumi.StringPtrInput + // Controls whether to preallocate a file when creating a new segment. + LogPreallocate pulumi.BoolPtrInput + // The maximum size of the log before deleting messages. + LogRetentionBytes pulumi.StringPtrInput + // The number of hours to keep a log file before deleting it. + LogRetentionHours pulumi.IntPtrInput + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMs pulumi.StringPtrInput + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + LogRollJitterMs pulumi.StringPtrInput + // The amount of time to wait before deleting a file from the filesystem. + LogSegmentDeleteDelayMs pulumi.IntPtrInput + // The maximum size of message that the server can receive. + MessageMaxBytes pulumi.IntPtrInput +} + +func (DatabaseKafkaConfigState) ElementType() reflect.Type { + return reflect.TypeOf((*databaseKafkaConfigState)(nil)).Elem() +} + +type databaseKafkaConfigArgs struct { + // Enable auto creation of topics. + AutoCreateTopicsEnable *bool `pulumi:"autoCreateTopicsEnable"` + // The ID of the target Kafka cluster. + ClusterId string `pulumi:"clusterId"` + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMs *int `pulumi:"groupInitialRebalanceDelayMs"` + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMs *int `pulumi:"groupMaxSessionTimeoutMs"` + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMs *int `pulumi:"groupMinSessionTimeoutMs"` + // How long are delete records retained? + LogCleanerDeleteRetentionMs *int `pulumi:"logCleanerDeleteRetentionMs"` + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMs *string `pulumi:"logCleanerMinCompactionLagMs"` + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + LogFlushIntervalMs *string `pulumi:"logFlushIntervalMs"` + // The interval with which Kafka adds an entry to the offset index. + LogIndexIntervalBytes *int `pulumi:"logIndexIntervalBytes"` + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable *bool `pulumi:"logMessageDownconversionEnable"` + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + LogMessageTimestampDifferenceMaxMs *string `pulumi:"logMessageTimestampDifferenceMaxMs"` + // Controls whether to preallocate a file when creating a new segment. + LogPreallocate *bool `pulumi:"logPreallocate"` + // The maximum size of the log before deleting messages. + LogRetentionBytes *string `pulumi:"logRetentionBytes"` + // The number of hours to keep a log file before deleting it. + LogRetentionHours *int `pulumi:"logRetentionHours"` + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMs *string `pulumi:"logRetentionMs"` + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + LogRollJitterMs *string `pulumi:"logRollJitterMs"` + // The amount of time to wait before deleting a file from the filesystem. + LogSegmentDeleteDelayMs *int `pulumi:"logSegmentDeleteDelayMs"` + // The maximum size of message that the server can receive. + MessageMaxBytes *int `pulumi:"messageMaxBytes"` +} + +// The set of arguments for constructing a DatabaseKafkaConfig resource. +type DatabaseKafkaConfigArgs struct { + // Enable auto creation of topics. + AutoCreateTopicsEnable pulumi.BoolPtrInput + // The ID of the target Kafka cluster. + ClusterId pulumi.StringInput + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMs pulumi.IntPtrInput + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMs pulumi.IntPtrInput + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMs pulumi.IntPtrInput + // How long are delete records retained? + LogCleanerDeleteRetentionMs pulumi.IntPtrInput + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMs pulumi.StringPtrInput + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + LogFlushIntervalMs pulumi.StringPtrInput + // The interval with which Kafka adds an entry to the offset index. + LogIndexIntervalBytes pulumi.IntPtrInput + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable pulumi.BoolPtrInput + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + LogMessageTimestampDifferenceMaxMs pulumi.StringPtrInput + // Controls whether to preallocate a file when creating a new segment. + LogPreallocate pulumi.BoolPtrInput + // The maximum size of the log before deleting messages. + LogRetentionBytes pulumi.StringPtrInput + // The number of hours to keep a log file before deleting it. + LogRetentionHours pulumi.IntPtrInput + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMs pulumi.StringPtrInput + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + LogRollJitterMs pulumi.StringPtrInput + // The amount of time to wait before deleting a file from the filesystem. + LogSegmentDeleteDelayMs pulumi.IntPtrInput + // The maximum size of message that the server can receive. + MessageMaxBytes pulumi.IntPtrInput +} + +func (DatabaseKafkaConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*databaseKafkaConfigArgs)(nil)).Elem() +} + +type DatabaseKafkaConfigInput interface { + pulumi.Input + + ToDatabaseKafkaConfigOutput() DatabaseKafkaConfigOutput + ToDatabaseKafkaConfigOutputWithContext(ctx context.Context) DatabaseKafkaConfigOutput +} + +func (*DatabaseKafkaConfig) ElementType() reflect.Type { + return reflect.TypeOf((**DatabaseKafkaConfig)(nil)).Elem() +} + +func (i *DatabaseKafkaConfig) ToDatabaseKafkaConfigOutput() DatabaseKafkaConfigOutput { + return i.ToDatabaseKafkaConfigOutputWithContext(context.Background()) +} + +func (i *DatabaseKafkaConfig) ToDatabaseKafkaConfigOutputWithContext(ctx context.Context) DatabaseKafkaConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseKafkaConfigOutput) +} + +// DatabaseKafkaConfigArrayInput is an input type that accepts DatabaseKafkaConfigArray and DatabaseKafkaConfigArrayOutput values. +// You can construct a concrete instance of `DatabaseKafkaConfigArrayInput` via: +// +// DatabaseKafkaConfigArray{ DatabaseKafkaConfigArgs{...} } +type DatabaseKafkaConfigArrayInput interface { + pulumi.Input + + ToDatabaseKafkaConfigArrayOutput() DatabaseKafkaConfigArrayOutput + ToDatabaseKafkaConfigArrayOutputWithContext(context.Context) DatabaseKafkaConfigArrayOutput +} + +type DatabaseKafkaConfigArray []DatabaseKafkaConfigInput + +func (DatabaseKafkaConfigArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*DatabaseKafkaConfig)(nil)).Elem() +} + +func (i DatabaseKafkaConfigArray) ToDatabaseKafkaConfigArrayOutput() DatabaseKafkaConfigArrayOutput { + return i.ToDatabaseKafkaConfigArrayOutputWithContext(context.Background()) +} + +func (i DatabaseKafkaConfigArray) ToDatabaseKafkaConfigArrayOutputWithContext(ctx context.Context) DatabaseKafkaConfigArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseKafkaConfigArrayOutput) +} + +// DatabaseKafkaConfigMapInput is an input type that accepts DatabaseKafkaConfigMap and DatabaseKafkaConfigMapOutput values. +// You can construct a concrete instance of `DatabaseKafkaConfigMapInput` via: +// +// DatabaseKafkaConfigMap{ "key": DatabaseKafkaConfigArgs{...} } +type DatabaseKafkaConfigMapInput interface { + pulumi.Input + + ToDatabaseKafkaConfigMapOutput() DatabaseKafkaConfigMapOutput + ToDatabaseKafkaConfigMapOutputWithContext(context.Context) DatabaseKafkaConfigMapOutput +} + +type DatabaseKafkaConfigMap map[string]DatabaseKafkaConfigInput + +func (DatabaseKafkaConfigMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*DatabaseKafkaConfig)(nil)).Elem() +} + +func (i DatabaseKafkaConfigMap) ToDatabaseKafkaConfigMapOutput() DatabaseKafkaConfigMapOutput { + return i.ToDatabaseKafkaConfigMapOutputWithContext(context.Background()) +} + +func (i DatabaseKafkaConfigMap) ToDatabaseKafkaConfigMapOutputWithContext(ctx context.Context) DatabaseKafkaConfigMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseKafkaConfigMapOutput) +} + +type DatabaseKafkaConfigOutput struct{ *pulumi.OutputState } + +func (DatabaseKafkaConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DatabaseKafkaConfig)(nil)).Elem() +} + +func (o DatabaseKafkaConfigOutput) ToDatabaseKafkaConfigOutput() DatabaseKafkaConfigOutput { + return o +} + +func (o DatabaseKafkaConfigOutput) ToDatabaseKafkaConfigOutputWithContext(ctx context.Context) DatabaseKafkaConfigOutput { + return o +} + +// Enable auto creation of topics. +func (o DatabaseKafkaConfigOutput) AutoCreateTopicsEnable() pulumi.BoolOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.BoolOutput { return v.AutoCreateTopicsEnable }).(pulumi.BoolOutput) +} + +// The ID of the target Kafka cluster. +func (o DatabaseKafkaConfigOutput) ClusterId() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput) +} + +// The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. +func (o DatabaseKafkaConfigOutput) GroupInitialRebalanceDelayMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.GroupInitialRebalanceDelayMs }).(pulumi.IntOutput) +} + +// The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. +func (o DatabaseKafkaConfigOutput) GroupMaxSessionTimeoutMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.GroupMaxSessionTimeoutMs }).(pulumi.IntOutput) +} + +// The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. +func (o DatabaseKafkaConfigOutput) GroupMinSessionTimeoutMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.GroupMinSessionTimeoutMs }).(pulumi.IntOutput) +} + +// How long are delete records retained? +func (o DatabaseKafkaConfigOutput) LogCleanerDeleteRetentionMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.LogCleanerDeleteRetentionMs }).(pulumi.IntOutput) +} + +// The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. +func (o DatabaseKafkaConfigOutput) LogCleanerMinCompactionLagMs() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogCleanerMinCompactionLagMs }).(pulumi.StringOutput) +} + +// The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. +func (o DatabaseKafkaConfigOutput) LogFlushIntervalMs() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogFlushIntervalMs }).(pulumi.StringOutput) +} + +// The interval with which Kafka adds an entry to the offset index. +func (o DatabaseKafkaConfigOutput) LogIndexIntervalBytes() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.LogIndexIntervalBytes }).(pulumi.IntOutput) +} + +// This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. +func (o DatabaseKafkaConfigOutput) LogMessageDownconversionEnable() pulumi.BoolOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.BoolOutput { return v.LogMessageDownconversionEnable }).(pulumi.BoolOutput) +} + +// The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. +func (o DatabaseKafkaConfigOutput) LogMessageTimestampDifferenceMaxMs() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogMessageTimestampDifferenceMaxMs }).(pulumi.StringOutput) +} + +// Controls whether to preallocate a file when creating a new segment. +func (o DatabaseKafkaConfigOutput) LogPreallocate() pulumi.BoolOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.BoolOutput { return v.LogPreallocate }).(pulumi.BoolOutput) +} + +// The maximum size of the log before deleting messages. +func (o DatabaseKafkaConfigOutput) LogRetentionBytes() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogRetentionBytes }).(pulumi.StringOutput) +} + +// The number of hours to keep a log file before deleting it. +func (o DatabaseKafkaConfigOutput) LogRetentionHours() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.LogRetentionHours }).(pulumi.IntOutput) +} + +// The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. +func (o DatabaseKafkaConfigOutput) LogRetentionMs() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogRetentionMs }).(pulumi.StringOutput) +} + +// The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. +func (o DatabaseKafkaConfigOutput) LogRollJitterMs() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.StringOutput { return v.LogRollJitterMs }).(pulumi.StringOutput) +} + +// The amount of time to wait before deleting a file from the filesystem. +func (o DatabaseKafkaConfigOutput) LogSegmentDeleteDelayMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.LogSegmentDeleteDelayMs }).(pulumi.IntOutput) +} + +// The maximum size of message that the server can receive. +func (o DatabaseKafkaConfigOutput) MessageMaxBytes() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseKafkaConfig) pulumi.IntOutput { return v.MessageMaxBytes }).(pulumi.IntOutput) +} + +type DatabaseKafkaConfigArrayOutput struct{ *pulumi.OutputState } + +func (DatabaseKafkaConfigArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*DatabaseKafkaConfig)(nil)).Elem() +} + +func (o DatabaseKafkaConfigArrayOutput) ToDatabaseKafkaConfigArrayOutput() DatabaseKafkaConfigArrayOutput { + return o +} + +func (o DatabaseKafkaConfigArrayOutput) ToDatabaseKafkaConfigArrayOutputWithContext(ctx context.Context) DatabaseKafkaConfigArrayOutput { + return o +} + +func (o DatabaseKafkaConfigArrayOutput) Index(i pulumi.IntInput) DatabaseKafkaConfigOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *DatabaseKafkaConfig { + return vs[0].([]*DatabaseKafkaConfig)[vs[1].(int)] + }).(DatabaseKafkaConfigOutput) +} + +type DatabaseKafkaConfigMapOutput struct{ *pulumi.OutputState } + +func (DatabaseKafkaConfigMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*DatabaseKafkaConfig)(nil)).Elem() +} + +func (o DatabaseKafkaConfigMapOutput) ToDatabaseKafkaConfigMapOutput() DatabaseKafkaConfigMapOutput { + return o +} + +func (o DatabaseKafkaConfigMapOutput) ToDatabaseKafkaConfigMapOutputWithContext(ctx context.Context) DatabaseKafkaConfigMapOutput { + return o +} + +func (o DatabaseKafkaConfigMapOutput) MapIndex(k pulumi.StringInput) DatabaseKafkaConfigOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *DatabaseKafkaConfig { + return vs[0].(map[string]*DatabaseKafkaConfig)[vs[1].(string)] + }).(DatabaseKafkaConfigOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseKafkaConfigInput)(nil)).Elem(), &DatabaseKafkaConfig{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseKafkaConfigArrayInput)(nil)).Elem(), DatabaseKafkaConfigArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseKafkaConfigMapInput)(nil)).Elem(), DatabaseKafkaConfigMap{}) + pulumi.RegisterOutputType(DatabaseKafkaConfigOutput{}) + pulumi.RegisterOutputType(DatabaseKafkaConfigArrayOutput{}) + pulumi.RegisterOutputType(DatabaseKafkaConfigMapOutput{}) +} diff --git a/sdk/go/digitalocean/databaseMongodbConfig.go b/sdk/go/digitalocean/databaseMongodbConfig.go new file mode 100644 index 00000000..dc313959 --- /dev/null +++ b/sdk/go/digitalocean/databaseMongodbConfig.go @@ -0,0 +1,347 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package digitalocean + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Provides a virtual resource that can be used to change advanced configuration +// options for a DigitalOcean managed MongoDB database cluster. +// +// > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// exampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, "example", &digitalocean.DatabaseClusterArgs{ +// Name: pulumi.String("example-mongodb-cluster"), +// Engine: pulumi.String("mongodb"), +// Version: pulumi.String("7"), +// Size: pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB), +// Region: pulumi.String(digitalocean.RegionNYC3), +// NodeCount: pulumi.Int(1), +// }) +// if err != nil { +// return err +// } +// _, err = digitalocean.NewDatabaseMongodbConfig(ctx, "example", &digitalocean.DatabaseMongodbConfigArgs{ +// ClusterId: exampleDatabaseCluster.ID(), +// DefaultReadConcern: pulumi.String("majority"), +// DefaultWriteConcern: pulumi.String("majority"), +// TransactionLifetimeLimitSeconds: pulumi.Int(100), +// SlowOpThresholdMs: pulumi.Int(100), +// Verbosity: pulumi.Int(3), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. +// +// ```sh +// $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 +// ``` +type DatabaseMongodbConfig struct { + pulumi.CustomResourceState + + // The ID of the target MongoDB cluster. + ClusterId pulumi.StringOutput `pulumi:"clusterId"` + // Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + DefaultReadConcern pulumi.StringOutput `pulumi:"defaultReadConcern"` + // Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + DefaultWriteConcern pulumi.StringOutput `pulumi:"defaultWriteConcern"` + // Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + SlowOpThresholdMs pulumi.IntOutput `pulumi:"slowOpThresholdMs"` + // Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + TransactionLifetimeLimitSeconds pulumi.IntOutput `pulumi:"transactionLifetimeLimitSeconds"` + // The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + Verbosity pulumi.IntOutput `pulumi:"verbosity"` +} + +// NewDatabaseMongodbConfig registers a new resource with the given unique name, arguments, and options. +func NewDatabaseMongodbConfig(ctx *pulumi.Context, + name string, args *DatabaseMongodbConfigArgs, opts ...pulumi.ResourceOption) (*DatabaseMongodbConfig, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.ClusterId == nil { + return nil, errors.New("invalid value for required argument 'ClusterId'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource DatabaseMongodbConfig + err := ctx.RegisterResource("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetDatabaseMongodbConfig gets an existing DatabaseMongodbConfig resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetDatabaseMongodbConfig(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *DatabaseMongodbConfigState, opts ...pulumi.ResourceOption) (*DatabaseMongodbConfig, error) { + var resource DatabaseMongodbConfig + err := ctx.ReadResource("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering DatabaseMongodbConfig resources. +type databaseMongodbConfigState struct { + // The ID of the target MongoDB cluster. + ClusterId *string `pulumi:"clusterId"` + // Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + DefaultReadConcern *string `pulumi:"defaultReadConcern"` + // Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + DefaultWriteConcern *string `pulumi:"defaultWriteConcern"` + // Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + SlowOpThresholdMs *int `pulumi:"slowOpThresholdMs"` + // Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + TransactionLifetimeLimitSeconds *int `pulumi:"transactionLifetimeLimitSeconds"` + // The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + Verbosity *int `pulumi:"verbosity"` +} + +type DatabaseMongodbConfigState struct { + // The ID of the target MongoDB cluster. + ClusterId pulumi.StringPtrInput + // Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + DefaultReadConcern pulumi.StringPtrInput + // Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + DefaultWriteConcern pulumi.StringPtrInput + // Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + SlowOpThresholdMs pulumi.IntPtrInput + // Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + TransactionLifetimeLimitSeconds pulumi.IntPtrInput + // The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + Verbosity pulumi.IntPtrInput +} + +func (DatabaseMongodbConfigState) ElementType() reflect.Type { + return reflect.TypeOf((*databaseMongodbConfigState)(nil)).Elem() +} + +type databaseMongodbConfigArgs struct { + // The ID of the target MongoDB cluster. + ClusterId string `pulumi:"clusterId"` + // Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + DefaultReadConcern *string `pulumi:"defaultReadConcern"` + // Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + DefaultWriteConcern *string `pulumi:"defaultWriteConcern"` + // Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + SlowOpThresholdMs *int `pulumi:"slowOpThresholdMs"` + // Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + TransactionLifetimeLimitSeconds *int `pulumi:"transactionLifetimeLimitSeconds"` + // The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + Verbosity *int `pulumi:"verbosity"` +} + +// The set of arguments for constructing a DatabaseMongodbConfig resource. +type DatabaseMongodbConfigArgs struct { + // The ID of the target MongoDB cluster. + ClusterId pulumi.StringInput + // Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + DefaultReadConcern pulumi.StringPtrInput + // Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + DefaultWriteConcern pulumi.StringPtrInput + // Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + SlowOpThresholdMs pulumi.IntPtrInput + // Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + TransactionLifetimeLimitSeconds pulumi.IntPtrInput + // The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + Verbosity pulumi.IntPtrInput +} + +func (DatabaseMongodbConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*databaseMongodbConfigArgs)(nil)).Elem() +} + +type DatabaseMongodbConfigInput interface { + pulumi.Input + + ToDatabaseMongodbConfigOutput() DatabaseMongodbConfigOutput + ToDatabaseMongodbConfigOutputWithContext(ctx context.Context) DatabaseMongodbConfigOutput +} + +func (*DatabaseMongodbConfig) ElementType() reflect.Type { + return reflect.TypeOf((**DatabaseMongodbConfig)(nil)).Elem() +} + +func (i *DatabaseMongodbConfig) ToDatabaseMongodbConfigOutput() DatabaseMongodbConfigOutput { + return i.ToDatabaseMongodbConfigOutputWithContext(context.Background()) +} + +func (i *DatabaseMongodbConfig) ToDatabaseMongodbConfigOutputWithContext(ctx context.Context) DatabaseMongodbConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseMongodbConfigOutput) +} + +// DatabaseMongodbConfigArrayInput is an input type that accepts DatabaseMongodbConfigArray and DatabaseMongodbConfigArrayOutput values. +// You can construct a concrete instance of `DatabaseMongodbConfigArrayInput` via: +// +// DatabaseMongodbConfigArray{ DatabaseMongodbConfigArgs{...} } +type DatabaseMongodbConfigArrayInput interface { + pulumi.Input + + ToDatabaseMongodbConfigArrayOutput() DatabaseMongodbConfigArrayOutput + ToDatabaseMongodbConfigArrayOutputWithContext(context.Context) DatabaseMongodbConfigArrayOutput +} + +type DatabaseMongodbConfigArray []DatabaseMongodbConfigInput + +func (DatabaseMongodbConfigArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*DatabaseMongodbConfig)(nil)).Elem() +} + +func (i DatabaseMongodbConfigArray) ToDatabaseMongodbConfigArrayOutput() DatabaseMongodbConfigArrayOutput { + return i.ToDatabaseMongodbConfigArrayOutputWithContext(context.Background()) +} + +func (i DatabaseMongodbConfigArray) ToDatabaseMongodbConfigArrayOutputWithContext(ctx context.Context) DatabaseMongodbConfigArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseMongodbConfigArrayOutput) +} + +// DatabaseMongodbConfigMapInput is an input type that accepts DatabaseMongodbConfigMap and DatabaseMongodbConfigMapOutput values. +// You can construct a concrete instance of `DatabaseMongodbConfigMapInput` via: +// +// DatabaseMongodbConfigMap{ "key": DatabaseMongodbConfigArgs{...} } +type DatabaseMongodbConfigMapInput interface { + pulumi.Input + + ToDatabaseMongodbConfigMapOutput() DatabaseMongodbConfigMapOutput + ToDatabaseMongodbConfigMapOutputWithContext(context.Context) DatabaseMongodbConfigMapOutput +} + +type DatabaseMongodbConfigMap map[string]DatabaseMongodbConfigInput + +func (DatabaseMongodbConfigMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*DatabaseMongodbConfig)(nil)).Elem() +} + +func (i DatabaseMongodbConfigMap) ToDatabaseMongodbConfigMapOutput() DatabaseMongodbConfigMapOutput { + return i.ToDatabaseMongodbConfigMapOutputWithContext(context.Background()) +} + +func (i DatabaseMongodbConfigMap) ToDatabaseMongodbConfigMapOutputWithContext(ctx context.Context) DatabaseMongodbConfigMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseMongodbConfigMapOutput) +} + +type DatabaseMongodbConfigOutput struct{ *pulumi.OutputState } + +func (DatabaseMongodbConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DatabaseMongodbConfig)(nil)).Elem() +} + +func (o DatabaseMongodbConfigOutput) ToDatabaseMongodbConfigOutput() DatabaseMongodbConfigOutput { + return o +} + +func (o DatabaseMongodbConfigOutput) ToDatabaseMongodbConfigOutputWithContext(ctx context.Context) DatabaseMongodbConfigOutput { + return o +} + +// The ID of the target MongoDB cluster. +func (o DatabaseMongodbConfigOutput) ClusterId() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput) +} + +// Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). +func (o DatabaseMongodbConfigOutput) DefaultReadConcern() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.StringOutput { return v.DefaultReadConcern }).(pulumi.StringOutput) +} + +// Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). +func (o DatabaseMongodbConfigOutput) DefaultWriteConcern() pulumi.StringOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.StringOutput { return v.DefaultWriteConcern }).(pulumi.StringOutput) +} + +// Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). +func (o DatabaseMongodbConfigOutput) SlowOpThresholdMs() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.IntOutput { return v.SlowOpThresholdMs }).(pulumi.IntOutput) +} + +// Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). +func (o DatabaseMongodbConfigOutput) TransactionLifetimeLimitSeconds() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.IntOutput { return v.TransactionLifetimeLimitSeconds }).(pulumi.IntOutput) +} + +// The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). +func (o DatabaseMongodbConfigOutput) Verbosity() pulumi.IntOutput { + return o.ApplyT(func(v *DatabaseMongodbConfig) pulumi.IntOutput { return v.Verbosity }).(pulumi.IntOutput) +} + +type DatabaseMongodbConfigArrayOutput struct{ *pulumi.OutputState } + +func (DatabaseMongodbConfigArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*DatabaseMongodbConfig)(nil)).Elem() +} + +func (o DatabaseMongodbConfigArrayOutput) ToDatabaseMongodbConfigArrayOutput() DatabaseMongodbConfigArrayOutput { + return o +} + +func (o DatabaseMongodbConfigArrayOutput) ToDatabaseMongodbConfigArrayOutputWithContext(ctx context.Context) DatabaseMongodbConfigArrayOutput { + return o +} + +func (o DatabaseMongodbConfigArrayOutput) Index(i pulumi.IntInput) DatabaseMongodbConfigOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *DatabaseMongodbConfig { + return vs[0].([]*DatabaseMongodbConfig)[vs[1].(int)] + }).(DatabaseMongodbConfigOutput) +} + +type DatabaseMongodbConfigMapOutput struct{ *pulumi.OutputState } + +func (DatabaseMongodbConfigMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*DatabaseMongodbConfig)(nil)).Elem() +} + +func (o DatabaseMongodbConfigMapOutput) ToDatabaseMongodbConfigMapOutput() DatabaseMongodbConfigMapOutput { + return o +} + +func (o DatabaseMongodbConfigMapOutput) ToDatabaseMongodbConfigMapOutputWithContext(ctx context.Context) DatabaseMongodbConfigMapOutput { + return o +} + +func (o DatabaseMongodbConfigMapOutput) MapIndex(k pulumi.StringInput) DatabaseMongodbConfigOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *DatabaseMongodbConfig { + return vs[0].(map[string]*DatabaseMongodbConfig)[vs[1].(string)] + }).(DatabaseMongodbConfigOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseMongodbConfigInput)(nil)).Elem(), &DatabaseMongodbConfig{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseMongodbConfigArrayInput)(nil)).Elem(), DatabaseMongodbConfigArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseMongodbConfigMapInput)(nil)).Elem(), DatabaseMongodbConfigMap{}) + pulumi.RegisterOutputType(DatabaseMongodbConfigOutput{}) + pulumi.RegisterOutputType(DatabaseMongodbConfigArrayOutput{}) + pulumi.RegisterOutputType(DatabaseMongodbConfigMapOutput{}) +} diff --git a/sdk/go/digitalocean/init.go b/sdk/go/digitalocean/init.go index 208ca40a..67b8f25b 100644 --- a/sdk/go/digitalocean/init.go +++ b/sdk/go/digitalocean/init.go @@ -41,8 +41,12 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &DatabaseDb{} case "digitalocean:index/databaseFirewall:DatabaseFirewall": r = &DatabaseFirewall{} + case "digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig": + r = &DatabaseKafkaConfig{} case "digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic": r = &DatabaseKafkaTopic{} + case "digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig": + r = &DatabaseMongodbConfig{} case "digitalocean:index/databaseMysqlConfig:DatabaseMysqlConfig": r = &DatabaseMysqlConfig{} case "digitalocean:index/databasePostgresqlConfig:DatabasePostgresqlConfig": @@ -190,11 +194,21 @@ func init() { "index/databaseFirewall", &module{version}, ) + pulumi.RegisterResourceModule( + "digitalocean", + "index/databaseKafkaConfig", + &module{version}, + ) pulumi.RegisterResourceModule( "digitalocean", "index/databaseKafkaTopic", &module{version}, ) + pulumi.RegisterResourceModule( + "digitalocean", + "index/databaseMongodbConfig", + &module{version}, + ) pulumi.RegisterResourceModule( "digitalocean", "index/databaseMysqlConfig", diff --git a/sdk/go/digitalocean/pulumiTypes.go b/sdk/go/digitalocean/pulumiTypes.go index 419ef201..d336160a 100644 --- a/sdk/go/digitalocean/pulumiTypes.go +++ b/sdk/go/digitalocean/pulumiTypes.go @@ -15175,7 +15175,8 @@ type DatabaseUserSetting struct { // A set of ACLs (Access Control Lists) specifying permission on topics with a Kafka cluster. The properties of an individual ACL are described below: // // An individual ACL includes the following: - Acls []DatabaseUserSettingAcl `pulumi:"acls"` + Acls []DatabaseUserSettingAcl `pulumi:"acls"` + OpensearchAcls []DatabaseUserSettingOpensearchAcl `pulumi:"opensearchAcls"` } // DatabaseUserSettingInput is an input type that accepts DatabaseUserSettingArgs and DatabaseUserSettingOutput values. @@ -15193,7 +15194,8 @@ type DatabaseUserSettingArgs struct { // A set of ACLs (Access Control Lists) specifying permission on topics with a Kafka cluster. The properties of an individual ACL are described below: // // An individual ACL includes the following: - Acls DatabaseUserSettingAclArrayInput `pulumi:"acls"` + Acls DatabaseUserSettingAclArrayInput `pulumi:"acls"` + OpensearchAcls DatabaseUserSettingOpensearchAclArrayInput `pulumi:"opensearchAcls"` } func (DatabaseUserSettingArgs) ElementType() reflect.Type { @@ -15254,6 +15256,10 @@ func (o DatabaseUserSettingOutput) Acls() DatabaseUserSettingAclArrayOutput { return o.ApplyT(func(v DatabaseUserSetting) []DatabaseUserSettingAcl { return v.Acls }).(DatabaseUserSettingAclArrayOutput) } +func (o DatabaseUserSettingOutput) OpensearchAcls() DatabaseUserSettingOpensearchAclArrayOutput { + return o.ApplyT(func(v DatabaseUserSetting) []DatabaseUserSettingOpensearchAcl { return v.OpensearchAcls }).(DatabaseUserSettingOpensearchAclArrayOutput) +} + type DatabaseUserSettingArrayOutput struct{ *pulumi.OutputState } func (DatabaseUserSettingArrayOutput) ElementType() reflect.Type { @@ -15389,6 +15395,109 @@ func (o DatabaseUserSettingAclArrayOutput) Index(i pulumi.IntInput) DatabaseUser }).(DatabaseUserSettingAclOutput) } +type DatabaseUserSettingOpensearchAcl struct { + Index string `pulumi:"index"` + // The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + Permission string `pulumi:"permission"` +} + +// DatabaseUserSettingOpensearchAclInput is an input type that accepts DatabaseUserSettingOpensearchAclArgs and DatabaseUserSettingOpensearchAclOutput values. +// You can construct a concrete instance of `DatabaseUserSettingOpensearchAclInput` via: +// +// DatabaseUserSettingOpensearchAclArgs{...} +type DatabaseUserSettingOpensearchAclInput interface { + pulumi.Input + + ToDatabaseUserSettingOpensearchAclOutput() DatabaseUserSettingOpensearchAclOutput + ToDatabaseUserSettingOpensearchAclOutputWithContext(context.Context) DatabaseUserSettingOpensearchAclOutput +} + +type DatabaseUserSettingOpensearchAclArgs struct { + Index pulumi.StringInput `pulumi:"index"` + // The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + Permission pulumi.StringInput `pulumi:"permission"` +} + +func (DatabaseUserSettingOpensearchAclArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DatabaseUserSettingOpensearchAcl)(nil)).Elem() +} + +func (i DatabaseUserSettingOpensearchAclArgs) ToDatabaseUserSettingOpensearchAclOutput() DatabaseUserSettingOpensearchAclOutput { + return i.ToDatabaseUserSettingOpensearchAclOutputWithContext(context.Background()) +} + +func (i DatabaseUserSettingOpensearchAclArgs) ToDatabaseUserSettingOpensearchAclOutputWithContext(ctx context.Context) DatabaseUserSettingOpensearchAclOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseUserSettingOpensearchAclOutput) +} + +// DatabaseUserSettingOpensearchAclArrayInput is an input type that accepts DatabaseUserSettingOpensearchAclArray and DatabaseUserSettingOpensearchAclArrayOutput values. +// You can construct a concrete instance of `DatabaseUserSettingOpensearchAclArrayInput` via: +// +// DatabaseUserSettingOpensearchAclArray{ DatabaseUserSettingOpensearchAclArgs{...} } +type DatabaseUserSettingOpensearchAclArrayInput interface { + pulumi.Input + + ToDatabaseUserSettingOpensearchAclArrayOutput() DatabaseUserSettingOpensearchAclArrayOutput + ToDatabaseUserSettingOpensearchAclArrayOutputWithContext(context.Context) DatabaseUserSettingOpensearchAclArrayOutput +} + +type DatabaseUserSettingOpensearchAclArray []DatabaseUserSettingOpensearchAclInput + +func (DatabaseUserSettingOpensearchAclArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]DatabaseUserSettingOpensearchAcl)(nil)).Elem() +} + +func (i DatabaseUserSettingOpensearchAclArray) ToDatabaseUserSettingOpensearchAclArrayOutput() DatabaseUserSettingOpensearchAclArrayOutput { + return i.ToDatabaseUserSettingOpensearchAclArrayOutputWithContext(context.Background()) +} + +func (i DatabaseUserSettingOpensearchAclArray) ToDatabaseUserSettingOpensearchAclArrayOutputWithContext(ctx context.Context) DatabaseUserSettingOpensearchAclArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(DatabaseUserSettingOpensearchAclArrayOutput) +} + +type DatabaseUserSettingOpensearchAclOutput struct{ *pulumi.OutputState } + +func (DatabaseUserSettingOpensearchAclOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DatabaseUserSettingOpensearchAcl)(nil)).Elem() +} + +func (o DatabaseUserSettingOpensearchAclOutput) ToDatabaseUserSettingOpensearchAclOutput() DatabaseUserSettingOpensearchAclOutput { + return o +} + +func (o DatabaseUserSettingOpensearchAclOutput) ToDatabaseUserSettingOpensearchAclOutputWithContext(ctx context.Context) DatabaseUserSettingOpensearchAclOutput { + return o +} + +func (o DatabaseUserSettingOpensearchAclOutput) Index() pulumi.StringOutput { + return o.ApplyT(func(v DatabaseUserSettingOpensearchAcl) string { return v.Index }).(pulumi.StringOutput) +} + +// The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. +func (o DatabaseUserSettingOpensearchAclOutput) Permission() pulumi.StringOutput { + return o.ApplyT(func(v DatabaseUserSettingOpensearchAcl) string { return v.Permission }).(pulumi.StringOutput) +} + +type DatabaseUserSettingOpensearchAclArrayOutput struct{ *pulumi.OutputState } + +func (DatabaseUserSettingOpensearchAclArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]DatabaseUserSettingOpensearchAcl)(nil)).Elem() +} + +func (o DatabaseUserSettingOpensearchAclArrayOutput) ToDatabaseUserSettingOpensearchAclArrayOutput() DatabaseUserSettingOpensearchAclArrayOutput { + return o +} + +func (o DatabaseUserSettingOpensearchAclArrayOutput) ToDatabaseUserSettingOpensearchAclArrayOutputWithContext(ctx context.Context) DatabaseUserSettingOpensearchAclArrayOutput { + return o +} + +func (o DatabaseUserSettingOpensearchAclArrayOutput) Index(i pulumi.IntInput) DatabaseUserSettingOpensearchAclOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) DatabaseUserSettingOpensearchAcl { + return vs[0].([]DatabaseUserSettingOpensearchAcl)[vs[1].(int)] + }).(DatabaseUserSettingOpensearchAclOutput) +} + type FirewallInboundRule struct { // The ports on which traffic will be allowed // specified as a string containing a single port, a range (e.g. "8000-9000"), @@ -39912,6 +40021,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*DatabaseUserSettingArrayInput)(nil)).Elem(), DatabaseUserSettingArray{}) pulumi.RegisterInputType(reflect.TypeOf((*DatabaseUserSettingAclInput)(nil)).Elem(), DatabaseUserSettingAclArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DatabaseUserSettingAclArrayInput)(nil)).Elem(), DatabaseUserSettingAclArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseUserSettingOpensearchAclInput)(nil)).Elem(), DatabaseUserSettingOpensearchAclArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DatabaseUserSettingOpensearchAclArrayInput)(nil)).Elem(), DatabaseUserSettingOpensearchAclArray{}) pulumi.RegisterInputType(reflect.TypeOf((*FirewallInboundRuleInput)(nil)).Elem(), FirewallInboundRuleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*FirewallInboundRuleArrayInput)(nil)).Elem(), FirewallInboundRuleArray{}) pulumi.RegisterInputType(reflect.TypeOf((*FirewallOutboundRuleInput)(nil)).Elem(), FirewallOutboundRuleArgs{}) @@ -40421,6 +40532,8 @@ func init() { pulumi.RegisterOutputType(DatabaseUserSettingArrayOutput{}) pulumi.RegisterOutputType(DatabaseUserSettingAclOutput{}) pulumi.RegisterOutputType(DatabaseUserSettingAclArrayOutput{}) + pulumi.RegisterOutputType(DatabaseUserSettingOpensearchAclOutput{}) + pulumi.RegisterOutputType(DatabaseUserSettingOpensearchAclArrayOutput{}) pulumi.RegisterOutputType(FirewallInboundRuleOutput{}) pulumi.RegisterOutputType(FirewallInboundRuleArrayOutput{}) pulumi.RegisterOutputType(FirewallOutboundRuleOutput{}) diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfig.java b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfig.java new file mode 100644 index 00000000..1ff776c9 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfig.java @@ -0,0 +1,348 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.digitalocean.DatabaseKafkaConfigArgs; +import com.pulumi.digitalocean.Utilities; +import com.pulumi.digitalocean.inputs.DatabaseKafkaConfigState; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.String; +import javax.annotation.Nullable; + +/** + * Provides a virtual resource that can be used to change advanced configuration + * options for a DigitalOcean managed Kafka database cluster. + * + * > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * ## Import + * + * A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. + * + * ```sh + * $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + * ``` + * + */ +@ResourceType(type="digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig") +public class DatabaseKafkaConfig extends com.pulumi.resources.CustomResource { + /** + * Enable auto creation of topics. + * + */ + @Export(name="autoCreateTopicsEnable", refs={Boolean.class}, tree="[0]") + private Output autoCreateTopicsEnable; + + /** + * @return Enable auto creation of topics. + * + */ + public Output autoCreateTopicsEnable() { + return this.autoCreateTopicsEnable; + } + /** + * The ID of the target Kafka cluster. + * + */ + @Export(name="clusterId", refs={String.class}, tree="[0]") + private Output clusterId; + + /** + * @return The ID of the target Kafka cluster. + * + */ + public Output clusterId() { + return this.clusterId; + } + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + @Export(name="groupInitialRebalanceDelayMs", refs={Integer.class}, tree="[0]") + private Output groupInitialRebalanceDelayMs; + + /** + * @return The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + public Output groupInitialRebalanceDelayMs() { + return this.groupInitialRebalanceDelayMs; + } + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Export(name="groupMaxSessionTimeoutMs", refs={Integer.class}, tree="[0]") + private Output groupMaxSessionTimeoutMs; + + /** + * @return The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Output groupMaxSessionTimeoutMs() { + return this.groupMaxSessionTimeoutMs; + } + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Export(name="groupMinSessionTimeoutMs", refs={Integer.class}, tree="[0]") + private Output groupMinSessionTimeoutMs; + + /** + * @return The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Output groupMinSessionTimeoutMs() { + return this.groupMinSessionTimeoutMs; + } + /** + * How long are delete records retained? + * + */ + @Export(name="logCleanerDeleteRetentionMs", refs={Integer.class}, tree="[0]") + private Output logCleanerDeleteRetentionMs; + + /** + * @return How long are delete records retained? + * + */ + public Output logCleanerDeleteRetentionMs() { + return this.logCleanerDeleteRetentionMs; + } + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + @Export(name="logCleanerMinCompactionLagMs", refs={String.class}, tree="[0]") + private Output logCleanerMinCompactionLagMs; + + /** + * @return The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + public Output logCleanerMinCompactionLagMs() { + return this.logCleanerMinCompactionLagMs; + } + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + @Export(name="logFlushIntervalMs", refs={String.class}, tree="[0]") + private Output logFlushIntervalMs; + + /** + * @return The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + public Output logFlushIntervalMs() { + return this.logFlushIntervalMs; + } + /** + * The interval with which Kafka adds an entry to the offset index. + * + */ + @Export(name="logIndexIntervalBytes", refs={Integer.class}, tree="[0]") + private Output logIndexIntervalBytes; + + /** + * @return The interval with which Kafka adds an entry to the offset index. + * + */ + public Output logIndexIntervalBytes() { + return this.logIndexIntervalBytes; + } + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + @Export(name="logMessageDownconversionEnable", refs={Boolean.class}, tree="[0]") + private Output logMessageDownconversionEnable; + + /** + * @return This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + public Output logMessageDownconversionEnable() { + return this.logMessageDownconversionEnable; + } + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + @Export(name="logMessageTimestampDifferenceMaxMs", refs={String.class}, tree="[0]") + private Output logMessageTimestampDifferenceMaxMs; + + /** + * @return The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + public Output logMessageTimestampDifferenceMaxMs() { + return this.logMessageTimestampDifferenceMaxMs; + } + /** + * Controls whether to preallocate a file when creating a new segment. + * + */ + @Export(name="logPreallocate", refs={Boolean.class}, tree="[0]") + private Output logPreallocate; + + /** + * @return Controls whether to preallocate a file when creating a new segment. + * + */ + public Output logPreallocate() { + return this.logPreallocate; + } + /** + * The maximum size of the log before deleting messages. + * + */ + @Export(name="logRetentionBytes", refs={String.class}, tree="[0]") + private Output logRetentionBytes; + + /** + * @return The maximum size of the log before deleting messages. + * + */ + public Output logRetentionBytes() { + return this.logRetentionBytes; + } + /** + * The number of hours to keep a log file before deleting it. + * + */ + @Export(name="logRetentionHours", refs={Integer.class}, tree="[0]") + private Output logRetentionHours; + + /** + * @return The number of hours to keep a log file before deleting it. + * + */ + public Output logRetentionHours() { + return this.logRetentionHours; + } + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + @Export(name="logRetentionMs", refs={String.class}, tree="[0]") + private Output logRetentionMs; + + /** + * @return The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + public Output logRetentionMs() { + return this.logRetentionMs; + } + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + @Export(name="logRollJitterMs", refs={String.class}, tree="[0]") + private Output logRollJitterMs; + + /** + * @return The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + public Output logRollJitterMs() { + return this.logRollJitterMs; + } + /** + * The amount of time to wait before deleting a file from the filesystem. + * + */ + @Export(name="logSegmentDeleteDelayMs", refs={Integer.class}, tree="[0]") + private Output logSegmentDeleteDelayMs; + + /** + * @return The amount of time to wait before deleting a file from the filesystem. + * + */ + public Output logSegmentDeleteDelayMs() { + return this.logSegmentDeleteDelayMs; + } + /** + * The maximum size of message that the server can receive. + * + */ + @Export(name="messageMaxBytes", refs={Integer.class}, tree="[0]") + private Output messageMaxBytes; + + /** + * @return The maximum size of message that the server can receive. + * + */ + public Output messageMaxBytes() { + return this.messageMaxBytes; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public DatabaseKafkaConfig(java.lang.String name) { + this(name, DatabaseKafkaConfigArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public DatabaseKafkaConfig(java.lang.String name, DatabaseKafkaConfigArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public DatabaseKafkaConfig(java.lang.String name, DatabaseKafkaConfigArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); + } + + private DatabaseKafkaConfig(java.lang.String name, Output id, @Nullable DatabaseKafkaConfigState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig", name, state, makeResourceOptions(options, id), false); + } + + private static DatabaseKafkaConfigArgs makeArgs(DatabaseKafkaConfigArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + if (options != null && options.getUrn().isPresent()) { + return null; + } + return args == null ? DatabaseKafkaConfigArgs.Empty : args; + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static DatabaseKafkaConfig get(java.lang.String name, Output id, @Nullable DatabaseKafkaConfigState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new DatabaseKafkaConfig(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfigArgs.java b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfigArgs.java new file mode 100644 index 00000000..339cbf90 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseKafkaConfigArgs.java @@ -0,0 +1,718 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class DatabaseKafkaConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final DatabaseKafkaConfigArgs Empty = new DatabaseKafkaConfigArgs(); + + /** + * Enable auto creation of topics. + * + */ + @Import(name="autoCreateTopicsEnable") + private @Nullable Output autoCreateTopicsEnable; + + /** + * @return Enable auto creation of topics. + * + */ + public Optional> autoCreateTopicsEnable() { + return Optional.ofNullable(this.autoCreateTopicsEnable); + } + + /** + * The ID of the target Kafka cluster. + * + */ + @Import(name="clusterId", required=true) + private Output clusterId; + + /** + * @return The ID of the target Kafka cluster. + * + */ + public Output clusterId() { + return this.clusterId; + } + + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + @Import(name="groupInitialRebalanceDelayMs") + private @Nullable Output groupInitialRebalanceDelayMs; + + /** + * @return The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + public Optional> groupInitialRebalanceDelayMs() { + return Optional.ofNullable(this.groupInitialRebalanceDelayMs); + } + + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Import(name="groupMaxSessionTimeoutMs") + private @Nullable Output groupMaxSessionTimeoutMs; + + /** + * @return The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Optional> groupMaxSessionTimeoutMs() { + return Optional.ofNullable(this.groupMaxSessionTimeoutMs); + } + + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Import(name="groupMinSessionTimeoutMs") + private @Nullable Output groupMinSessionTimeoutMs; + + /** + * @return The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Optional> groupMinSessionTimeoutMs() { + return Optional.ofNullable(this.groupMinSessionTimeoutMs); + } + + /** + * How long are delete records retained? + * + */ + @Import(name="logCleanerDeleteRetentionMs") + private @Nullable Output logCleanerDeleteRetentionMs; + + /** + * @return How long are delete records retained? + * + */ + public Optional> logCleanerDeleteRetentionMs() { + return Optional.ofNullable(this.logCleanerDeleteRetentionMs); + } + + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + @Import(name="logCleanerMinCompactionLagMs") + private @Nullable Output logCleanerMinCompactionLagMs; + + /** + * @return The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + public Optional> logCleanerMinCompactionLagMs() { + return Optional.ofNullable(this.logCleanerMinCompactionLagMs); + } + + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + @Import(name="logFlushIntervalMs") + private @Nullable Output logFlushIntervalMs; + + /** + * @return The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + public Optional> logFlushIntervalMs() { + return Optional.ofNullable(this.logFlushIntervalMs); + } + + /** + * The interval with which Kafka adds an entry to the offset index. + * + */ + @Import(name="logIndexIntervalBytes") + private @Nullable Output logIndexIntervalBytes; + + /** + * @return The interval with which Kafka adds an entry to the offset index. + * + */ + public Optional> logIndexIntervalBytes() { + return Optional.ofNullable(this.logIndexIntervalBytes); + } + + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + @Import(name="logMessageDownconversionEnable") + private @Nullable Output logMessageDownconversionEnable; + + /** + * @return This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + public Optional> logMessageDownconversionEnable() { + return Optional.ofNullable(this.logMessageDownconversionEnable); + } + + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + @Import(name="logMessageTimestampDifferenceMaxMs") + private @Nullable Output logMessageTimestampDifferenceMaxMs; + + /** + * @return The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + public Optional> logMessageTimestampDifferenceMaxMs() { + return Optional.ofNullable(this.logMessageTimestampDifferenceMaxMs); + } + + /** + * Controls whether to preallocate a file when creating a new segment. + * + */ + @Import(name="logPreallocate") + private @Nullable Output logPreallocate; + + /** + * @return Controls whether to preallocate a file when creating a new segment. + * + */ + public Optional> logPreallocate() { + return Optional.ofNullable(this.logPreallocate); + } + + /** + * The maximum size of the log before deleting messages. + * + */ + @Import(name="logRetentionBytes") + private @Nullable Output logRetentionBytes; + + /** + * @return The maximum size of the log before deleting messages. + * + */ + public Optional> logRetentionBytes() { + return Optional.ofNullable(this.logRetentionBytes); + } + + /** + * The number of hours to keep a log file before deleting it. + * + */ + @Import(name="logRetentionHours") + private @Nullable Output logRetentionHours; + + /** + * @return The number of hours to keep a log file before deleting it. + * + */ + public Optional> logRetentionHours() { + return Optional.ofNullable(this.logRetentionHours); + } + + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + @Import(name="logRetentionMs") + private @Nullable Output logRetentionMs; + + /** + * @return The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + public Optional> logRetentionMs() { + return Optional.ofNullable(this.logRetentionMs); + } + + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + @Import(name="logRollJitterMs") + private @Nullable Output logRollJitterMs; + + /** + * @return The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + public Optional> logRollJitterMs() { + return Optional.ofNullable(this.logRollJitterMs); + } + + /** + * The amount of time to wait before deleting a file from the filesystem. + * + */ + @Import(name="logSegmentDeleteDelayMs") + private @Nullable Output logSegmentDeleteDelayMs; + + /** + * @return The amount of time to wait before deleting a file from the filesystem. + * + */ + public Optional> logSegmentDeleteDelayMs() { + return Optional.ofNullable(this.logSegmentDeleteDelayMs); + } + + /** + * The maximum size of message that the server can receive. + * + */ + @Import(name="messageMaxBytes") + private @Nullable Output messageMaxBytes; + + /** + * @return The maximum size of message that the server can receive. + * + */ + public Optional> messageMaxBytes() { + return Optional.ofNullable(this.messageMaxBytes); + } + + private DatabaseKafkaConfigArgs() {} + + private DatabaseKafkaConfigArgs(DatabaseKafkaConfigArgs $) { + this.autoCreateTopicsEnable = $.autoCreateTopicsEnable; + this.clusterId = $.clusterId; + this.groupInitialRebalanceDelayMs = $.groupInitialRebalanceDelayMs; + this.groupMaxSessionTimeoutMs = $.groupMaxSessionTimeoutMs; + this.groupMinSessionTimeoutMs = $.groupMinSessionTimeoutMs; + this.logCleanerDeleteRetentionMs = $.logCleanerDeleteRetentionMs; + this.logCleanerMinCompactionLagMs = $.logCleanerMinCompactionLagMs; + this.logFlushIntervalMs = $.logFlushIntervalMs; + this.logIndexIntervalBytes = $.logIndexIntervalBytes; + this.logMessageDownconversionEnable = $.logMessageDownconversionEnable; + this.logMessageTimestampDifferenceMaxMs = $.logMessageTimestampDifferenceMaxMs; + this.logPreallocate = $.logPreallocate; + this.logRetentionBytes = $.logRetentionBytes; + this.logRetentionHours = $.logRetentionHours; + this.logRetentionMs = $.logRetentionMs; + this.logRollJitterMs = $.logRollJitterMs; + this.logSegmentDeleteDelayMs = $.logSegmentDeleteDelayMs; + this.messageMaxBytes = $.messageMaxBytes; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DatabaseKafkaConfigArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DatabaseKafkaConfigArgs $; + + public Builder() { + $ = new DatabaseKafkaConfigArgs(); + } + + public Builder(DatabaseKafkaConfigArgs defaults) { + $ = new DatabaseKafkaConfigArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param autoCreateTopicsEnable Enable auto creation of topics. + * + * @return builder + * + */ + public Builder autoCreateTopicsEnable(@Nullable Output autoCreateTopicsEnable) { + $.autoCreateTopicsEnable = autoCreateTopicsEnable; + return this; + } + + /** + * @param autoCreateTopicsEnable Enable auto creation of topics. + * + * @return builder + * + */ + public Builder autoCreateTopicsEnable(Boolean autoCreateTopicsEnable) { + return autoCreateTopicsEnable(Output.of(autoCreateTopicsEnable)); + } + + /** + * @param clusterId The ID of the target Kafka cluster. + * + * @return builder + * + */ + public Builder clusterId(Output clusterId) { + $.clusterId = clusterId; + return this; + } + + /** + * @param clusterId The ID of the target Kafka cluster. + * + * @return builder + * + */ + public Builder clusterId(String clusterId) { + return clusterId(Output.of(clusterId)); + } + + /** + * @param groupInitialRebalanceDelayMs The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + * @return builder + * + */ + public Builder groupInitialRebalanceDelayMs(@Nullable Output groupInitialRebalanceDelayMs) { + $.groupInitialRebalanceDelayMs = groupInitialRebalanceDelayMs; + return this; + } + + /** + * @param groupInitialRebalanceDelayMs The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + * @return builder + * + */ + public Builder groupInitialRebalanceDelayMs(Integer groupInitialRebalanceDelayMs) { + return groupInitialRebalanceDelayMs(Output.of(groupInitialRebalanceDelayMs)); + } + + /** + * @param groupMaxSessionTimeoutMs The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMaxSessionTimeoutMs(@Nullable Output groupMaxSessionTimeoutMs) { + $.groupMaxSessionTimeoutMs = groupMaxSessionTimeoutMs; + return this; + } + + /** + * @param groupMaxSessionTimeoutMs The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMaxSessionTimeoutMs(Integer groupMaxSessionTimeoutMs) { + return groupMaxSessionTimeoutMs(Output.of(groupMaxSessionTimeoutMs)); + } + + /** + * @param groupMinSessionTimeoutMs The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMinSessionTimeoutMs(@Nullable Output groupMinSessionTimeoutMs) { + $.groupMinSessionTimeoutMs = groupMinSessionTimeoutMs; + return this; + } + + /** + * @param groupMinSessionTimeoutMs The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMinSessionTimeoutMs(Integer groupMinSessionTimeoutMs) { + return groupMinSessionTimeoutMs(Output.of(groupMinSessionTimeoutMs)); + } + + /** + * @param logCleanerDeleteRetentionMs How long are delete records retained? + * + * @return builder + * + */ + public Builder logCleanerDeleteRetentionMs(@Nullable Output logCleanerDeleteRetentionMs) { + $.logCleanerDeleteRetentionMs = logCleanerDeleteRetentionMs; + return this; + } + + /** + * @param logCleanerDeleteRetentionMs How long are delete records retained? + * + * @return builder + * + */ + public Builder logCleanerDeleteRetentionMs(Integer logCleanerDeleteRetentionMs) { + return logCleanerDeleteRetentionMs(Output.of(logCleanerDeleteRetentionMs)); + } + + /** + * @param logCleanerMinCompactionLagMs The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + * @return builder + * + */ + public Builder logCleanerMinCompactionLagMs(@Nullable Output logCleanerMinCompactionLagMs) { + $.logCleanerMinCompactionLagMs = logCleanerMinCompactionLagMs; + return this; + } + + /** + * @param logCleanerMinCompactionLagMs The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + * @return builder + * + */ + public Builder logCleanerMinCompactionLagMs(String logCleanerMinCompactionLagMs) { + return logCleanerMinCompactionLagMs(Output.of(logCleanerMinCompactionLagMs)); + } + + /** + * @param logFlushIntervalMs The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + * @return builder + * + */ + public Builder logFlushIntervalMs(@Nullable Output logFlushIntervalMs) { + $.logFlushIntervalMs = logFlushIntervalMs; + return this; + } + + /** + * @param logFlushIntervalMs The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + * @return builder + * + */ + public Builder logFlushIntervalMs(String logFlushIntervalMs) { + return logFlushIntervalMs(Output.of(logFlushIntervalMs)); + } + + /** + * @param logIndexIntervalBytes The interval with which Kafka adds an entry to the offset index. + * + * @return builder + * + */ + public Builder logIndexIntervalBytes(@Nullable Output logIndexIntervalBytes) { + $.logIndexIntervalBytes = logIndexIntervalBytes; + return this; + } + + /** + * @param logIndexIntervalBytes The interval with which Kafka adds an entry to the offset index. + * + * @return builder + * + */ + public Builder logIndexIntervalBytes(Integer logIndexIntervalBytes) { + return logIndexIntervalBytes(Output.of(logIndexIntervalBytes)); + } + + /** + * @param logMessageDownconversionEnable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + * @return builder + * + */ + public Builder logMessageDownconversionEnable(@Nullable Output logMessageDownconversionEnable) { + $.logMessageDownconversionEnable = logMessageDownconversionEnable; + return this; + } + + /** + * @param logMessageDownconversionEnable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + * @return builder + * + */ + public Builder logMessageDownconversionEnable(Boolean logMessageDownconversionEnable) { + return logMessageDownconversionEnable(Output.of(logMessageDownconversionEnable)); + } + + /** + * @param logMessageTimestampDifferenceMaxMs The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + * @return builder + * + */ + public Builder logMessageTimestampDifferenceMaxMs(@Nullable Output logMessageTimestampDifferenceMaxMs) { + $.logMessageTimestampDifferenceMaxMs = logMessageTimestampDifferenceMaxMs; + return this; + } + + /** + * @param logMessageTimestampDifferenceMaxMs The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + * @return builder + * + */ + public Builder logMessageTimestampDifferenceMaxMs(String logMessageTimestampDifferenceMaxMs) { + return logMessageTimestampDifferenceMaxMs(Output.of(logMessageTimestampDifferenceMaxMs)); + } + + /** + * @param logPreallocate Controls whether to preallocate a file when creating a new segment. + * + * @return builder + * + */ + public Builder logPreallocate(@Nullable Output logPreallocate) { + $.logPreallocate = logPreallocate; + return this; + } + + /** + * @param logPreallocate Controls whether to preallocate a file when creating a new segment. + * + * @return builder + * + */ + public Builder logPreallocate(Boolean logPreallocate) { + return logPreallocate(Output.of(logPreallocate)); + } + + /** + * @param logRetentionBytes The maximum size of the log before deleting messages. + * + * @return builder + * + */ + public Builder logRetentionBytes(@Nullable Output logRetentionBytes) { + $.logRetentionBytes = logRetentionBytes; + return this; + } + + /** + * @param logRetentionBytes The maximum size of the log before deleting messages. + * + * @return builder + * + */ + public Builder logRetentionBytes(String logRetentionBytes) { + return logRetentionBytes(Output.of(logRetentionBytes)); + } + + /** + * @param logRetentionHours The number of hours to keep a log file before deleting it. + * + * @return builder + * + */ + public Builder logRetentionHours(@Nullable Output logRetentionHours) { + $.logRetentionHours = logRetentionHours; + return this; + } + + /** + * @param logRetentionHours The number of hours to keep a log file before deleting it. + * + * @return builder + * + */ + public Builder logRetentionHours(Integer logRetentionHours) { + return logRetentionHours(Output.of(logRetentionHours)); + } + + /** + * @param logRetentionMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + * @return builder + * + */ + public Builder logRetentionMs(@Nullable Output logRetentionMs) { + $.logRetentionMs = logRetentionMs; + return this; + } + + /** + * @param logRetentionMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + * @return builder + * + */ + public Builder logRetentionMs(String logRetentionMs) { + return logRetentionMs(Output.of(logRetentionMs)); + } + + /** + * @param logRollJitterMs The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + * @return builder + * + */ + public Builder logRollJitterMs(@Nullable Output logRollJitterMs) { + $.logRollJitterMs = logRollJitterMs; + return this; + } + + /** + * @param logRollJitterMs The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + * @return builder + * + */ + public Builder logRollJitterMs(String logRollJitterMs) { + return logRollJitterMs(Output.of(logRollJitterMs)); + } + + /** + * @param logSegmentDeleteDelayMs The amount of time to wait before deleting a file from the filesystem. + * + * @return builder + * + */ + public Builder logSegmentDeleteDelayMs(@Nullable Output logSegmentDeleteDelayMs) { + $.logSegmentDeleteDelayMs = logSegmentDeleteDelayMs; + return this; + } + + /** + * @param logSegmentDeleteDelayMs The amount of time to wait before deleting a file from the filesystem. + * + * @return builder + * + */ + public Builder logSegmentDeleteDelayMs(Integer logSegmentDeleteDelayMs) { + return logSegmentDeleteDelayMs(Output.of(logSegmentDeleteDelayMs)); + } + + /** + * @param messageMaxBytes The maximum size of message that the server can receive. + * + * @return builder + * + */ + public Builder messageMaxBytes(@Nullable Output messageMaxBytes) { + $.messageMaxBytes = messageMaxBytes; + return this; + } + + /** + * @param messageMaxBytes The maximum size of message that the server can receive. + * + * @return builder + * + */ + public Builder messageMaxBytes(Integer messageMaxBytes) { + return messageMaxBytes(Output.of(messageMaxBytes)); + } + + public DatabaseKafkaConfigArgs build() { + if ($.clusterId == null) { + throw new MissingRequiredPropertyException("DatabaseKafkaConfigArgs", "clusterId"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfig.java b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfig.java new file mode 100644 index 00000000..79eb024c --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfig.java @@ -0,0 +1,225 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.digitalocean.DatabaseMongodbConfigArgs; +import com.pulumi.digitalocean.Utilities; +import com.pulumi.digitalocean.inputs.DatabaseMongodbConfigState; +import java.lang.Integer; +import java.lang.String; +import javax.annotation.Nullable; + +/** + * Provides a virtual resource that can be used to change advanced configuration + * options for a DigitalOcean managed MongoDB database cluster. + * + * > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.digitalocean.DatabaseCluster;
+ * import com.pulumi.digitalocean.DatabaseClusterArgs;
+ * import com.pulumi.digitalocean.DatabaseMongodbConfig;
+ * import com.pulumi.digitalocean.DatabaseMongodbConfigArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var exampleDatabaseCluster = new DatabaseCluster("exampleDatabaseCluster", DatabaseClusterArgs.builder()
+ *             .name("example-mongodb-cluster")
+ *             .engine("mongodb")
+ *             .version("7")
+ *             .size("db-s-1vcpu-1gb")
+ *             .region("nyc3")
+ *             .nodeCount(1)
+ *             .build());
+ * 
+ *         var example = new DatabaseMongodbConfig("example", DatabaseMongodbConfigArgs.builder()
+ *             .clusterId(exampleDatabaseCluster.id())
+ *             .defaultReadConcern("majority")
+ *             .defaultWriteConcern("majority")
+ *             .transactionLifetimeLimitSeconds(100)
+ *             .slowOpThresholdMs(100)
+ *             .verbosity(3)
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * + * ## Import + * + * A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. + * + * ```sh + * $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + * ``` + * + */ +@ResourceType(type="digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig") +public class DatabaseMongodbConfig extends com.pulumi.resources.CustomResource { + /** + * The ID of the target MongoDB cluster. + * + */ + @Export(name="clusterId", refs={String.class}, tree="[0]") + private Output clusterId; + + /** + * @return The ID of the target MongoDB cluster. + * + */ + public Output clusterId() { + return this.clusterId; + } + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + @Export(name="defaultReadConcern", refs={String.class}, tree="[0]") + private Output defaultReadConcern; + + /** + * @return Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + public Output defaultReadConcern() { + return this.defaultReadConcern; + } + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + @Export(name="defaultWriteConcern", refs={String.class}, tree="[0]") + private Output defaultWriteConcern; + + /** + * @return Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + public Output defaultWriteConcern() { + return this.defaultWriteConcern; + } + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + @Export(name="slowOpThresholdMs", refs={Integer.class}, tree="[0]") + private Output slowOpThresholdMs; + + /** + * @return Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + public Output slowOpThresholdMs() { + return this.slowOpThresholdMs; + } + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + @Export(name="transactionLifetimeLimitSeconds", refs={Integer.class}, tree="[0]") + private Output transactionLifetimeLimitSeconds; + + /** + * @return Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + public Output transactionLifetimeLimitSeconds() { + return this.transactionLifetimeLimitSeconds; + } + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + @Export(name="verbosity", refs={Integer.class}, tree="[0]") + private Output verbosity; + + /** + * @return The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + public Output verbosity() { + return this.verbosity; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public DatabaseMongodbConfig(java.lang.String name) { + this(name, DatabaseMongodbConfigArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public DatabaseMongodbConfig(java.lang.String name, DatabaseMongodbConfigArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public DatabaseMongodbConfig(java.lang.String name, DatabaseMongodbConfigArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); + } + + private DatabaseMongodbConfig(java.lang.String name, Output id, @Nullable DatabaseMongodbConfigState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig", name, state, makeResourceOptions(options, id), false); + } + + private static DatabaseMongodbConfigArgs makeArgs(DatabaseMongodbConfigArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + if (options != null && options.getUrn().isPresent()) { + return null; + } + return args == null ? DatabaseMongodbConfigArgs.Empty : args; + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static DatabaseMongodbConfig get(java.lang.String name, Output id, @Nullable DatabaseMongodbConfigState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new DatabaseMongodbConfig(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfigArgs.java b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfigArgs.java new file mode 100644 index 00000000..5ca9cb4b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/DatabaseMongodbConfigArgs.java @@ -0,0 +1,273 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class DatabaseMongodbConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final DatabaseMongodbConfigArgs Empty = new DatabaseMongodbConfigArgs(); + + /** + * The ID of the target MongoDB cluster. + * + */ + @Import(name="clusterId", required=true) + private Output clusterId; + + /** + * @return The ID of the target MongoDB cluster. + * + */ + public Output clusterId() { + return this.clusterId; + } + + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + @Import(name="defaultReadConcern") + private @Nullable Output defaultReadConcern; + + /** + * @return Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + public Optional> defaultReadConcern() { + return Optional.ofNullable(this.defaultReadConcern); + } + + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + @Import(name="defaultWriteConcern") + private @Nullable Output defaultWriteConcern; + + /** + * @return Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + public Optional> defaultWriteConcern() { + return Optional.ofNullable(this.defaultWriteConcern); + } + + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + @Import(name="slowOpThresholdMs") + private @Nullable Output slowOpThresholdMs; + + /** + * @return Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + public Optional> slowOpThresholdMs() { + return Optional.ofNullable(this.slowOpThresholdMs); + } + + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + @Import(name="transactionLifetimeLimitSeconds") + private @Nullable Output transactionLifetimeLimitSeconds; + + /** + * @return Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + public Optional> transactionLifetimeLimitSeconds() { + return Optional.ofNullable(this.transactionLifetimeLimitSeconds); + } + + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + @Import(name="verbosity") + private @Nullable Output verbosity; + + /** + * @return The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + public Optional> verbosity() { + return Optional.ofNullable(this.verbosity); + } + + private DatabaseMongodbConfigArgs() {} + + private DatabaseMongodbConfigArgs(DatabaseMongodbConfigArgs $) { + this.clusterId = $.clusterId; + this.defaultReadConcern = $.defaultReadConcern; + this.defaultWriteConcern = $.defaultWriteConcern; + this.slowOpThresholdMs = $.slowOpThresholdMs; + this.transactionLifetimeLimitSeconds = $.transactionLifetimeLimitSeconds; + this.verbosity = $.verbosity; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DatabaseMongodbConfigArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DatabaseMongodbConfigArgs $; + + public Builder() { + $ = new DatabaseMongodbConfigArgs(); + } + + public Builder(DatabaseMongodbConfigArgs defaults) { + $ = new DatabaseMongodbConfigArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param clusterId The ID of the target MongoDB cluster. + * + * @return builder + * + */ + public Builder clusterId(Output clusterId) { + $.clusterId = clusterId; + return this; + } + + /** + * @param clusterId The ID of the target MongoDB cluster. + * + * @return builder + * + */ + public Builder clusterId(String clusterId) { + return clusterId(Output.of(clusterId)); + } + + /** + * @param defaultReadConcern Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + * @return builder + * + */ + public Builder defaultReadConcern(@Nullable Output defaultReadConcern) { + $.defaultReadConcern = defaultReadConcern; + return this; + } + + /** + * @param defaultReadConcern Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + * @return builder + * + */ + public Builder defaultReadConcern(String defaultReadConcern) { + return defaultReadConcern(Output.of(defaultReadConcern)); + } + + /** + * @param defaultWriteConcern Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + * @return builder + * + */ + public Builder defaultWriteConcern(@Nullable Output defaultWriteConcern) { + $.defaultWriteConcern = defaultWriteConcern; + return this; + } + + /** + * @param defaultWriteConcern Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + * @return builder + * + */ + public Builder defaultWriteConcern(String defaultWriteConcern) { + return defaultWriteConcern(Output.of(defaultWriteConcern)); + } + + /** + * @param slowOpThresholdMs Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + * @return builder + * + */ + public Builder slowOpThresholdMs(@Nullable Output slowOpThresholdMs) { + $.slowOpThresholdMs = slowOpThresholdMs; + return this; + } + + /** + * @param slowOpThresholdMs Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + * @return builder + * + */ + public Builder slowOpThresholdMs(Integer slowOpThresholdMs) { + return slowOpThresholdMs(Output.of(slowOpThresholdMs)); + } + + /** + * @param transactionLifetimeLimitSeconds Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + * @return builder + * + */ + public Builder transactionLifetimeLimitSeconds(@Nullable Output transactionLifetimeLimitSeconds) { + $.transactionLifetimeLimitSeconds = transactionLifetimeLimitSeconds; + return this; + } + + /** + * @param transactionLifetimeLimitSeconds Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + * @return builder + * + */ + public Builder transactionLifetimeLimitSeconds(Integer transactionLifetimeLimitSeconds) { + return transactionLifetimeLimitSeconds(Output.of(transactionLifetimeLimitSeconds)); + } + + /** + * @param verbosity The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + * @return builder + * + */ + public Builder verbosity(@Nullable Output verbosity) { + $.verbosity = verbosity; + return this; + } + + /** + * @param verbosity The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + * @return builder + * + */ + public Builder verbosity(Integer verbosity) { + return verbosity(Output.of(verbosity)); + } + + public DatabaseMongodbConfigArgs build() { + if ($.clusterId == null) { + throw new MissingRequiredPropertyException("DatabaseMongodbConfigArgs", "clusterId"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseKafkaConfigState.java b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseKafkaConfigState.java new file mode 100644 index 00000000..ed93a4e3 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseKafkaConfigState.java @@ -0,0 +1,714 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class DatabaseKafkaConfigState extends com.pulumi.resources.ResourceArgs { + + public static final DatabaseKafkaConfigState Empty = new DatabaseKafkaConfigState(); + + /** + * Enable auto creation of topics. + * + */ + @Import(name="autoCreateTopicsEnable") + private @Nullable Output autoCreateTopicsEnable; + + /** + * @return Enable auto creation of topics. + * + */ + public Optional> autoCreateTopicsEnable() { + return Optional.ofNullable(this.autoCreateTopicsEnable); + } + + /** + * The ID of the target Kafka cluster. + * + */ + @Import(name="clusterId") + private @Nullable Output clusterId; + + /** + * @return The ID of the target Kafka cluster. + * + */ + public Optional> clusterId() { + return Optional.ofNullable(this.clusterId); + } + + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + @Import(name="groupInitialRebalanceDelayMs") + private @Nullable Output groupInitialRebalanceDelayMs; + + /** + * @return The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + */ + public Optional> groupInitialRebalanceDelayMs() { + return Optional.ofNullable(this.groupInitialRebalanceDelayMs); + } + + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Import(name="groupMaxSessionTimeoutMs") + private @Nullable Output groupMaxSessionTimeoutMs; + + /** + * @return The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Optional> groupMaxSessionTimeoutMs() { + return Optional.ofNullable(this.groupMaxSessionTimeoutMs); + } + + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + @Import(name="groupMinSessionTimeoutMs") + private @Nullable Output groupMinSessionTimeoutMs; + + /** + * @return The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + */ + public Optional> groupMinSessionTimeoutMs() { + return Optional.ofNullable(this.groupMinSessionTimeoutMs); + } + + /** + * How long are delete records retained? + * + */ + @Import(name="logCleanerDeleteRetentionMs") + private @Nullable Output logCleanerDeleteRetentionMs; + + /** + * @return How long are delete records retained? + * + */ + public Optional> logCleanerDeleteRetentionMs() { + return Optional.ofNullable(this.logCleanerDeleteRetentionMs); + } + + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + @Import(name="logCleanerMinCompactionLagMs") + private @Nullable Output logCleanerMinCompactionLagMs; + + /** + * @return The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + */ + public Optional> logCleanerMinCompactionLagMs() { + return Optional.ofNullable(this.logCleanerMinCompactionLagMs); + } + + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + @Import(name="logFlushIntervalMs") + private @Nullable Output logFlushIntervalMs; + + /** + * @return The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + */ + public Optional> logFlushIntervalMs() { + return Optional.ofNullable(this.logFlushIntervalMs); + } + + /** + * The interval with which Kafka adds an entry to the offset index. + * + */ + @Import(name="logIndexIntervalBytes") + private @Nullable Output logIndexIntervalBytes; + + /** + * @return The interval with which Kafka adds an entry to the offset index. + * + */ + public Optional> logIndexIntervalBytes() { + return Optional.ofNullable(this.logIndexIntervalBytes); + } + + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + @Import(name="logMessageDownconversionEnable") + private @Nullable Output logMessageDownconversionEnable; + + /** + * @return This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + */ + public Optional> logMessageDownconversionEnable() { + return Optional.ofNullable(this.logMessageDownconversionEnable); + } + + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + @Import(name="logMessageTimestampDifferenceMaxMs") + private @Nullable Output logMessageTimestampDifferenceMaxMs; + + /** + * @return The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + */ + public Optional> logMessageTimestampDifferenceMaxMs() { + return Optional.ofNullable(this.logMessageTimestampDifferenceMaxMs); + } + + /** + * Controls whether to preallocate a file when creating a new segment. + * + */ + @Import(name="logPreallocate") + private @Nullable Output logPreallocate; + + /** + * @return Controls whether to preallocate a file when creating a new segment. + * + */ + public Optional> logPreallocate() { + return Optional.ofNullable(this.logPreallocate); + } + + /** + * The maximum size of the log before deleting messages. + * + */ + @Import(name="logRetentionBytes") + private @Nullable Output logRetentionBytes; + + /** + * @return The maximum size of the log before deleting messages. + * + */ + public Optional> logRetentionBytes() { + return Optional.ofNullable(this.logRetentionBytes); + } + + /** + * The number of hours to keep a log file before deleting it. + * + */ + @Import(name="logRetentionHours") + private @Nullable Output logRetentionHours; + + /** + * @return The number of hours to keep a log file before deleting it. + * + */ + public Optional> logRetentionHours() { + return Optional.ofNullable(this.logRetentionHours); + } + + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + @Import(name="logRetentionMs") + private @Nullable Output logRetentionMs; + + /** + * @return The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + */ + public Optional> logRetentionMs() { + return Optional.ofNullable(this.logRetentionMs); + } + + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + @Import(name="logRollJitterMs") + private @Nullable Output logRollJitterMs; + + /** + * @return The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + */ + public Optional> logRollJitterMs() { + return Optional.ofNullable(this.logRollJitterMs); + } + + /** + * The amount of time to wait before deleting a file from the filesystem. + * + */ + @Import(name="logSegmentDeleteDelayMs") + private @Nullable Output logSegmentDeleteDelayMs; + + /** + * @return The amount of time to wait before deleting a file from the filesystem. + * + */ + public Optional> logSegmentDeleteDelayMs() { + return Optional.ofNullable(this.logSegmentDeleteDelayMs); + } + + /** + * The maximum size of message that the server can receive. + * + */ + @Import(name="messageMaxBytes") + private @Nullable Output messageMaxBytes; + + /** + * @return The maximum size of message that the server can receive. + * + */ + public Optional> messageMaxBytes() { + return Optional.ofNullable(this.messageMaxBytes); + } + + private DatabaseKafkaConfigState() {} + + private DatabaseKafkaConfigState(DatabaseKafkaConfigState $) { + this.autoCreateTopicsEnable = $.autoCreateTopicsEnable; + this.clusterId = $.clusterId; + this.groupInitialRebalanceDelayMs = $.groupInitialRebalanceDelayMs; + this.groupMaxSessionTimeoutMs = $.groupMaxSessionTimeoutMs; + this.groupMinSessionTimeoutMs = $.groupMinSessionTimeoutMs; + this.logCleanerDeleteRetentionMs = $.logCleanerDeleteRetentionMs; + this.logCleanerMinCompactionLagMs = $.logCleanerMinCompactionLagMs; + this.logFlushIntervalMs = $.logFlushIntervalMs; + this.logIndexIntervalBytes = $.logIndexIntervalBytes; + this.logMessageDownconversionEnable = $.logMessageDownconversionEnable; + this.logMessageTimestampDifferenceMaxMs = $.logMessageTimestampDifferenceMaxMs; + this.logPreallocate = $.logPreallocate; + this.logRetentionBytes = $.logRetentionBytes; + this.logRetentionHours = $.logRetentionHours; + this.logRetentionMs = $.logRetentionMs; + this.logRollJitterMs = $.logRollJitterMs; + this.logSegmentDeleteDelayMs = $.logSegmentDeleteDelayMs; + this.messageMaxBytes = $.messageMaxBytes; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DatabaseKafkaConfigState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DatabaseKafkaConfigState $; + + public Builder() { + $ = new DatabaseKafkaConfigState(); + } + + public Builder(DatabaseKafkaConfigState defaults) { + $ = new DatabaseKafkaConfigState(Objects.requireNonNull(defaults)); + } + + /** + * @param autoCreateTopicsEnable Enable auto creation of topics. + * + * @return builder + * + */ + public Builder autoCreateTopicsEnable(@Nullable Output autoCreateTopicsEnable) { + $.autoCreateTopicsEnable = autoCreateTopicsEnable; + return this; + } + + /** + * @param autoCreateTopicsEnable Enable auto creation of topics. + * + * @return builder + * + */ + public Builder autoCreateTopicsEnable(Boolean autoCreateTopicsEnable) { + return autoCreateTopicsEnable(Output.of(autoCreateTopicsEnable)); + } + + /** + * @param clusterId The ID of the target Kafka cluster. + * + * @return builder + * + */ + public Builder clusterId(@Nullable Output clusterId) { + $.clusterId = clusterId; + return this; + } + + /** + * @param clusterId The ID of the target Kafka cluster. + * + * @return builder + * + */ + public Builder clusterId(String clusterId) { + return clusterId(Output.of(clusterId)); + } + + /** + * @param groupInitialRebalanceDelayMs The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + * @return builder + * + */ + public Builder groupInitialRebalanceDelayMs(@Nullable Output groupInitialRebalanceDelayMs) { + $.groupInitialRebalanceDelayMs = groupInitialRebalanceDelayMs; + return this; + } + + /** + * @param groupInitialRebalanceDelayMs The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + * + * @return builder + * + */ + public Builder groupInitialRebalanceDelayMs(Integer groupInitialRebalanceDelayMs) { + return groupInitialRebalanceDelayMs(Output.of(groupInitialRebalanceDelayMs)); + } + + /** + * @param groupMaxSessionTimeoutMs The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMaxSessionTimeoutMs(@Nullable Output groupMaxSessionTimeoutMs) { + $.groupMaxSessionTimeoutMs = groupMaxSessionTimeoutMs; + return this; + } + + /** + * @param groupMaxSessionTimeoutMs The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMaxSessionTimeoutMs(Integer groupMaxSessionTimeoutMs) { + return groupMaxSessionTimeoutMs(Output.of(groupMaxSessionTimeoutMs)); + } + + /** + * @param groupMinSessionTimeoutMs The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMinSessionTimeoutMs(@Nullable Output groupMinSessionTimeoutMs) { + $.groupMinSessionTimeoutMs = groupMinSessionTimeoutMs; + return this; + } + + /** + * @param groupMinSessionTimeoutMs The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + * + * @return builder + * + */ + public Builder groupMinSessionTimeoutMs(Integer groupMinSessionTimeoutMs) { + return groupMinSessionTimeoutMs(Output.of(groupMinSessionTimeoutMs)); + } + + /** + * @param logCleanerDeleteRetentionMs How long are delete records retained? + * + * @return builder + * + */ + public Builder logCleanerDeleteRetentionMs(@Nullable Output logCleanerDeleteRetentionMs) { + $.logCleanerDeleteRetentionMs = logCleanerDeleteRetentionMs; + return this; + } + + /** + * @param logCleanerDeleteRetentionMs How long are delete records retained? + * + * @return builder + * + */ + public Builder logCleanerDeleteRetentionMs(Integer logCleanerDeleteRetentionMs) { + return logCleanerDeleteRetentionMs(Output.of(logCleanerDeleteRetentionMs)); + } + + /** + * @param logCleanerMinCompactionLagMs The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + * @return builder + * + */ + public Builder logCleanerMinCompactionLagMs(@Nullable Output logCleanerMinCompactionLagMs) { + $.logCleanerMinCompactionLagMs = logCleanerMinCompactionLagMs; + return this; + } + + /** + * @param logCleanerMinCompactionLagMs The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + * + * @return builder + * + */ + public Builder logCleanerMinCompactionLagMs(String logCleanerMinCompactionLagMs) { + return logCleanerMinCompactionLagMs(Output.of(logCleanerMinCompactionLagMs)); + } + + /** + * @param logFlushIntervalMs The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + * @return builder + * + */ + public Builder logFlushIntervalMs(@Nullable Output logFlushIntervalMs) { + $.logFlushIntervalMs = logFlushIntervalMs; + return this; + } + + /** + * @param logFlushIntervalMs The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + * + * @return builder + * + */ + public Builder logFlushIntervalMs(String logFlushIntervalMs) { + return logFlushIntervalMs(Output.of(logFlushIntervalMs)); + } + + /** + * @param logIndexIntervalBytes The interval with which Kafka adds an entry to the offset index. + * + * @return builder + * + */ + public Builder logIndexIntervalBytes(@Nullable Output logIndexIntervalBytes) { + $.logIndexIntervalBytes = logIndexIntervalBytes; + return this; + } + + /** + * @param logIndexIntervalBytes The interval with which Kafka adds an entry to the offset index. + * + * @return builder + * + */ + public Builder logIndexIntervalBytes(Integer logIndexIntervalBytes) { + return logIndexIntervalBytes(Output.of(logIndexIntervalBytes)); + } + + /** + * @param logMessageDownconversionEnable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + * @return builder + * + */ + public Builder logMessageDownconversionEnable(@Nullable Output logMessageDownconversionEnable) { + $.logMessageDownconversionEnable = logMessageDownconversionEnable; + return this; + } + + /** + * @param logMessageDownconversionEnable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + * + * @return builder + * + */ + public Builder logMessageDownconversionEnable(Boolean logMessageDownconversionEnable) { + return logMessageDownconversionEnable(Output.of(logMessageDownconversionEnable)); + } + + /** + * @param logMessageTimestampDifferenceMaxMs The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + * @return builder + * + */ + public Builder logMessageTimestampDifferenceMaxMs(@Nullable Output logMessageTimestampDifferenceMaxMs) { + $.logMessageTimestampDifferenceMaxMs = logMessageTimestampDifferenceMaxMs; + return this; + } + + /** + * @param logMessageTimestampDifferenceMaxMs The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + * + * @return builder + * + */ + public Builder logMessageTimestampDifferenceMaxMs(String logMessageTimestampDifferenceMaxMs) { + return logMessageTimestampDifferenceMaxMs(Output.of(logMessageTimestampDifferenceMaxMs)); + } + + /** + * @param logPreallocate Controls whether to preallocate a file when creating a new segment. + * + * @return builder + * + */ + public Builder logPreallocate(@Nullable Output logPreallocate) { + $.logPreallocate = logPreallocate; + return this; + } + + /** + * @param logPreallocate Controls whether to preallocate a file when creating a new segment. + * + * @return builder + * + */ + public Builder logPreallocate(Boolean logPreallocate) { + return logPreallocate(Output.of(logPreallocate)); + } + + /** + * @param logRetentionBytes The maximum size of the log before deleting messages. + * + * @return builder + * + */ + public Builder logRetentionBytes(@Nullable Output logRetentionBytes) { + $.logRetentionBytes = logRetentionBytes; + return this; + } + + /** + * @param logRetentionBytes The maximum size of the log before deleting messages. + * + * @return builder + * + */ + public Builder logRetentionBytes(String logRetentionBytes) { + return logRetentionBytes(Output.of(logRetentionBytes)); + } + + /** + * @param logRetentionHours The number of hours to keep a log file before deleting it. + * + * @return builder + * + */ + public Builder logRetentionHours(@Nullable Output logRetentionHours) { + $.logRetentionHours = logRetentionHours; + return this; + } + + /** + * @param logRetentionHours The number of hours to keep a log file before deleting it. + * + * @return builder + * + */ + public Builder logRetentionHours(Integer logRetentionHours) { + return logRetentionHours(Output.of(logRetentionHours)); + } + + /** + * @param logRetentionMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + * @return builder + * + */ + public Builder logRetentionMs(@Nullable Output logRetentionMs) { + $.logRetentionMs = logRetentionMs; + return this; + } + + /** + * @param logRetentionMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + * + * @return builder + * + */ + public Builder logRetentionMs(String logRetentionMs) { + return logRetentionMs(Output.of(logRetentionMs)); + } + + /** + * @param logRollJitterMs The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + * @return builder + * + */ + public Builder logRollJitterMs(@Nullable Output logRollJitterMs) { + $.logRollJitterMs = logRollJitterMs; + return this; + } + + /** + * @param logRollJitterMs The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + * + * @return builder + * + */ + public Builder logRollJitterMs(String logRollJitterMs) { + return logRollJitterMs(Output.of(logRollJitterMs)); + } + + /** + * @param logSegmentDeleteDelayMs The amount of time to wait before deleting a file from the filesystem. + * + * @return builder + * + */ + public Builder logSegmentDeleteDelayMs(@Nullable Output logSegmentDeleteDelayMs) { + $.logSegmentDeleteDelayMs = logSegmentDeleteDelayMs; + return this; + } + + /** + * @param logSegmentDeleteDelayMs The amount of time to wait before deleting a file from the filesystem. + * + * @return builder + * + */ + public Builder logSegmentDeleteDelayMs(Integer logSegmentDeleteDelayMs) { + return logSegmentDeleteDelayMs(Output.of(logSegmentDeleteDelayMs)); + } + + /** + * @param messageMaxBytes The maximum size of message that the server can receive. + * + * @return builder + * + */ + public Builder messageMaxBytes(@Nullable Output messageMaxBytes) { + $.messageMaxBytes = messageMaxBytes; + return this; + } + + /** + * @param messageMaxBytes The maximum size of message that the server can receive. + * + * @return builder + * + */ + public Builder messageMaxBytes(Integer messageMaxBytes) { + return messageMaxBytes(Output.of(messageMaxBytes)); + } + + public DatabaseKafkaConfigState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseMongodbConfigState.java b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseMongodbConfigState.java new file mode 100644 index 00000000..305ffdb4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseMongodbConfigState.java @@ -0,0 +1,269 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class DatabaseMongodbConfigState extends com.pulumi.resources.ResourceArgs { + + public static final DatabaseMongodbConfigState Empty = new DatabaseMongodbConfigState(); + + /** + * The ID of the target MongoDB cluster. + * + */ + @Import(name="clusterId") + private @Nullable Output clusterId; + + /** + * @return The ID of the target MongoDB cluster. + * + */ + public Optional> clusterId() { + return Optional.ofNullable(this.clusterId); + } + + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + @Import(name="defaultReadConcern") + private @Nullable Output defaultReadConcern; + + /** + * @return Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + */ + public Optional> defaultReadConcern() { + return Optional.ofNullable(this.defaultReadConcern); + } + + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + @Import(name="defaultWriteConcern") + private @Nullable Output defaultWriteConcern; + + /** + * @return Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + */ + public Optional> defaultWriteConcern() { + return Optional.ofNullable(this.defaultWriteConcern); + } + + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + @Import(name="slowOpThresholdMs") + private @Nullable Output slowOpThresholdMs; + + /** + * @return Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + */ + public Optional> slowOpThresholdMs() { + return Optional.ofNullable(this.slowOpThresholdMs); + } + + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + @Import(name="transactionLifetimeLimitSeconds") + private @Nullable Output transactionLifetimeLimitSeconds; + + /** + * @return Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + */ + public Optional> transactionLifetimeLimitSeconds() { + return Optional.ofNullable(this.transactionLifetimeLimitSeconds); + } + + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + @Import(name="verbosity") + private @Nullable Output verbosity; + + /** + * @return The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + */ + public Optional> verbosity() { + return Optional.ofNullable(this.verbosity); + } + + private DatabaseMongodbConfigState() {} + + private DatabaseMongodbConfigState(DatabaseMongodbConfigState $) { + this.clusterId = $.clusterId; + this.defaultReadConcern = $.defaultReadConcern; + this.defaultWriteConcern = $.defaultWriteConcern; + this.slowOpThresholdMs = $.slowOpThresholdMs; + this.transactionLifetimeLimitSeconds = $.transactionLifetimeLimitSeconds; + this.verbosity = $.verbosity; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DatabaseMongodbConfigState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DatabaseMongodbConfigState $; + + public Builder() { + $ = new DatabaseMongodbConfigState(); + } + + public Builder(DatabaseMongodbConfigState defaults) { + $ = new DatabaseMongodbConfigState(Objects.requireNonNull(defaults)); + } + + /** + * @param clusterId The ID of the target MongoDB cluster. + * + * @return builder + * + */ + public Builder clusterId(@Nullable Output clusterId) { + $.clusterId = clusterId; + return this; + } + + /** + * @param clusterId The ID of the target MongoDB cluster. + * + * @return builder + * + */ + public Builder clusterId(String clusterId) { + return clusterId(Output.of(clusterId)); + } + + /** + * @param defaultReadConcern Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + * @return builder + * + */ + public Builder defaultReadConcern(@Nullable Output defaultReadConcern) { + $.defaultReadConcern = defaultReadConcern; + return this; + } + + /** + * @param defaultReadConcern Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + * + * @return builder + * + */ + public Builder defaultReadConcern(String defaultReadConcern) { + return defaultReadConcern(Output.of(defaultReadConcern)); + } + + /** + * @param defaultWriteConcern Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + * @return builder + * + */ + public Builder defaultWriteConcern(@Nullable Output defaultWriteConcern) { + $.defaultWriteConcern = defaultWriteConcern; + return this; + } + + /** + * @param defaultWriteConcern Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + * + * @return builder + * + */ + public Builder defaultWriteConcern(String defaultWriteConcern) { + return defaultWriteConcern(Output.of(defaultWriteConcern)); + } + + /** + * @param slowOpThresholdMs Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + * @return builder + * + */ + public Builder slowOpThresholdMs(@Nullable Output slowOpThresholdMs) { + $.slowOpThresholdMs = slowOpThresholdMs; + return this; + } + + /** + * @param slowOpThresholdMs Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + * + * @return builder + * + */ + public Builder slowOpThresholdMs(Integer slowOpThresholdMs) { + return slowOpThresholdMs(Output.of(slowOpThresholdMs)); + } + + /** + * @param transactionLifetimeLimitSeconds Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + * @return builder + * + */ + public Builder transactionLifetimeLimitSeconds(@Nullable Output transactionLifetimeLimitSeconds) { + $.transactionLifetimeLimitSeconds = transactionLifetimeLimitSeconds; + return this; + } + + /** + * @param transactionLifetimeLimitSeconds Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + * + * @return builder + * + */ + public Builder transactionLifetimeLimitSeconds(Integer transactionLifetimeLimitSeconds) { + return transactionLifetimeLimitSeconds(Output.of(transactionLifetimeLimitSeconds)); + } + + /** + * @param verbosity The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + * @return builder + * + */ + public Builder verbosity(@Nullable Output verbosity) { + $.verbosity = verbosity; + return this; + } + + /** + * @param verbosity The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. <em>Changing this parameter will lead to a restart of the MongoDB service.</em> Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + * + * @return builder + * + */ + public Builder verbosity(Integer verbosity) { + return verbosity(Output.of(verbosity)); + } + + public DatabaseMongodbConfigState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingArgs.java b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingArgs.java index 0aa820e4..ced4a666 100644 --- a/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingArgs.java +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.digitalocean.inputs.DatabaseUserSettingAclArgs; +import com.pulumi.digitalocean.inputs.DatabaseUserSettingOpensearchAclArgs; import java.util.List; import java.util.Objects; import java.util.Optional; @@ -35,10 +36,18 @@ public Optional>> acls() { return Optional.ofNullable(this.acls); } + @Import(name="opensearchAcls") + private @Nullable Output> opensearchAcls; + + public Optional>> opensearchAcls() { + return Optional.ofNullable(this.opensearchAcls); + } + private DatabaseUserSettingArgs() {} private DatabaseUserSettingArgs(DatabaseUserSettingArgs $) { this.acls = $.acls; + this.opensearchAcls = $.opensearchAcls; } public static Builder builder() { @@ -96,6 +105,19 @@ public Builder acls(DatabaseUserSettingAclArgs... acls) { return acls(List.of(acls)); } + public Builder opensearchAcls(@Nullable Output> opensearchAcls) { + $.opensearchAcls = opensearchAcls; + return this; + } + + public Builder opensearchAcls(List opensearchAcls) { + return opensearchAcls(Output.of(opensearchAcls)); + } + + public Builder opensearchAcls(DatabaseUserSettingOpensearchAclArgs... opensearchAcls) { + return opensearchAcls(List.of(opensearchAcls)); + } + public DatabaseUserSettingArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingOpensearchAclArgs.java b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingOpensearchAclArgs.java new file mode 100644 index 00000000..d4bf4212 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/inputs/DatabaseUserSettingOpensearchAclArgs.java @@ -0,0 +1,105 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class DatabaseUserSettingOpensearchAclArgs extends com.pulumi.resources.ResourceArgs { + + public static final DatabaseUserSettingOpensearchAclArgs Empty = new DatabaseUserSettingOpensearchAclArgs(); + + @Import(name="index", required=true) + private Output index; + + public Output index() { + return this.index; + } + + /** + * The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + */ + @Import(name="permission", required=true) + private Output permission; + + /** + * @return The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + */ + public Output permission() { + return this.permission; + } + + private DatabaseUserSettingOpensearchAclArgs() {} + + private DatabaseUserSettingOpensearchAclArgs(DatabaseUserSettingOpensearchAclArgs $) { + this.index = $.index; + this.permission = $.permission; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DatabaseUserSettingOpensearchAclArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DatabaseUserSettingOpensearchAclArgs $; + + public Builder() { + $ = new DatabaseUserSettingOpensearchAclArgs(); + } + + public Builder(DatabaseUserSettingOpensearchAclArgs defaults) { + $ = new DatabaseUserSettingOpensearchAclArgs(Objects.requireNonNull(defaults)); + } + + public Builder index(Output index) { + $.index = index; + return this; + } + + public Builder index(String index) { + return index(Output.of(index)); + } + + /** + * @param permission The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + * @return builder + * + */ + public Builder permission(Output permission) { + $.permission = permission; + return this; + } + + /** + * @param permission The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + * @return builder + * + */ + public Builder permission(String permission) { + return permission(Output.of(permission)); + } + + public DatabaseUserSettingOpensearchAclArgs build() { + if ($.index == null) { + throw new MissingRequiredPropertyException("DatabaseUserSettingOpensearchAclArgs", "index"); + } + if ($.permission == null) { + throw new MissingRequiredPropertyException("DatabaseUserSettingOpensearchAclArgs", "permission"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSetting.java b/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSetting.java index 2b66327e..f99a49d9 100644 --- a/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSetting.java +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSetting.java @@ -5,6 +5,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.digitalocean.outputs.DatabaseUserSettingAcl; +import com.pulumi.digitalocean.outputs.DatabaseUserSettingOpensearchAcl; import java.util.List; import java.util.Objects; import javax.annotation.Nullable; @@ -18,6 +19,7 @@ public final class DatabaseUserSetting { * */ private @Nullable List acls; + private @Nullable List opensearchAcls; private DatabaseUserSetting() {} /** @@ -29,6 +31,9 @@ private DatabaseUserSetting() {} public List acls() { return this.acls == null ? List.of() : this.acls; } + public List opensearchAcls() { + return this.opensearchAcls == null ? List.of() : this.opensearchAcls; + } public static Builder builder() { return new Builder(); @@ -40,10 +45,12 @@ public static Builder builder(DatabaseUserSetting defaults) { @CustomType.Builder public static final class Builder { private @Nullable List acls; + private @Nullable List opensearchAcls; public Builder() {} public Builder(DatabaseUserSetting defaults) { Objects.requireNonNull(defaults); this.acls = defaults.acls; + this.opensearchAcls = defaults.opensearchAcls; } @CustomType.Setter @@ -55,9 +62,19 @@ public Builder acls(@Nullable List acls) { public Builder acls(DatabaseUserSettingAcl... acls) { return acls(List.of(acls)); } + @CustomType.Setter + public Builder opensearchAcls(@Nullable List opensearchAcls) { + + this.opensearchAcls = opensearchAcls; + return this; + } + public Builder opensearchAcls(DatabaseUserSettingOpensearchAcl... opensearchAcls) { + return opensearchAcls(List.of(opensearchAcls)); + } public DatabaseUserSetting build() { final var _resultValue = new DatabaseUserSetting(); _resultValue.acls = acls; + _resultValue.opensearchAcls = opensearchAcls; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSettingOpensearchAcl.java b/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSettingOpensearchAcl.java new file mode 100644 index 00000000..99a43d7e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/digitalocean/outputs/DatabaseUserSettingOpensearchAcl.java @@ -0,0 +1,73 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.digitalocean.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class DatabaseUserSettingOpensearchAcl { + private String index; + /** + * @return The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + */ + private String permission; + + private DatabaseUserSettingOpensearchAcl() {} + public String index() { + return this.index; + } + /** + * @return The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + * + */ + public String permission() { + return this.permission; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(DatabaseUserSettingOpensearchAcl defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String index; + private String permission; + public Builder() {} + public Builder(DatabaseUserSettingOpensearchAcl defaults) { + Objects.requireNonNull(defaults); + this.index = defaults.index; + this.permission = defaults.permission; + } + + @CustomType.Setter + public Builder index(String index) { + if (index == null) { + throw new MissingRequiredPropertyException("DatabaseUserSettingOpensearchAcl", "index"); + } + this.index = index; + return this; + } + @CustomType.Setter + public Builder permission(String permission) { + if (permission == null) { + throw new MissingRequiredPropertyException("DatabaseUserSettingOpensearchAcl", "permission"); + } + this.permission = permission; + return this; + } + public DatabaseUserSettingOpensearchAcl build() { + final var _resultValue = new DatabaseUserSettingOpensearchAcl(); + _resultValue.index = index; + _resultValue.permission = permission; + return _resultValue; + } + } +} diff --git a/sdk/nodejs/databaseKafkaConfig.ts b/sdk/nodejs/databaseKafkaConfig.ts new file mode 100644 index 00000000..aa4c0f2b --- /dev/null +++ b/sdk/nodejs/databaseKafkaConfig.ts @@ -0,0 +1,372 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "./utilities"; + +/** + * Provides a virtual resource that can be used to change advanced configuration + * options for a DigitalOcean managed Kafka database cluster. + * + * > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as digitalocean from "@pulumi/digitalocean"; + * + * const exampleDatabaseCluster = new digitalocean.DatabaseCluster("example", { + * name: "example-kafka-cluster", + * engine: "kafka", + * version: "3.7", + * size: digitalocean.DatabaseSlug.DB_1VPCU1GB, + * region: digitalocean.Region.NYC3, + * nodeCount: 3, + * }); + * const example = new digitalocean.DatabaseKafkaConfig("example", { + * clusterId: exampleDatabaseCluster.id, + * groupInitialRebalanceDelayMs: 3000, + * groupMinSessionTimeoutMs: 6000, + * groupMaxSessionTimeoutMs: 1800000, + * messageMaxBytes: 1048588, + * logCleanerDeleteRetentionMs: 86400000, + * logCleanerMinCompactionLagMs: "0", + * logFlushIntervalMs: "9223372036854775807", + * logIndexIntervalBytes: 4096, + * logMessageDownconversionEnable: true, + * logMessageTimestampDifferenceMaxMs: "9223372036854775807", + * logPreallocate: false, + * logRetentionBytes: "-1", + * logRetentionHours: 168, + * logRetentionMs: "604800000", + * logRollJitterMs: "0", + * logSegmentDeleteDelayMs: 60000, + * autoCreateTopicsEnable: true, + * }); + * ``` + * + * ## Import + * + * A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. + * + * ```sh + * $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + * ``` + */ +export class DatabaseKafkaConfig extends pulumi.CustomResource { + /** + * Get an existing DatabaseKafkaConfig resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: DatabaseKafkaConfigState, opts?: pulumi.CustomResourceOptions): DatabaseKafkaConfig { + return new DatabaseKafkaConfig(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig'; + + /** + * Returns true if the given object is an instance of DatabaseKafkaConfig. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is DatabaseKafkaConfig { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === DatabaseKafkaConfig.__pulumiType; + } + + /** + * Enable auto creation of topics. + */ + public readonly autoCreateTopicsEnable!: pulumi.Output; + /** + * The ID of the target Kafka cluster. + */ + public readonly clusterId!: pulumi.Output; + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + */ + public readonly groupInitialRebalanceDelayMs!: pulumi.Output; + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + public readonly groupMaxSessionTimeoutMs!: pulumi.Output; + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + public readonly groupMinSessionTimeoutMs!: pulumi.Output; + /** + * How long are delete records retained? + */ + public readonly logCleanerDeleteRetentionMs!: pulumi.Output; + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + */ + public readonly logCleanerMinCompactionLagMs!: pulumi.Output; + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + */ + public readonly logFlushIntervalMs!: pulumi.Output; + /** + * The interval with which Kafka adds an entry to the offset index. + */ + public readonly logIndexIntervalBytes!: pulumi.Output; + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + */ + public readonly logMessageDownconversionEnable!: pulumi.Output; + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + */ + public readonly logMessageTimestampDifferenceMaxMs!: pulumi.Output; + /** + * Controls whether to preallocate a file when creating a new segment. + */ + public readonly logPreallocate!: pulumi.Output; + /** + * The maximum size of the log before deleting messages. + */ + public readonly logRetentionBytes!: pulumi.Output; + /** + * The number of hours to keep a log file before deleting it. + */ + public readonly logRetentionHours!: pulumi.Output; + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + */ + public readonly logRetentionMs!: pulumi.Output; + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + */ + public readonly logRollJitterMs!: pulumi.Output; + /** + * The amount of time to wait before deleting a file from the filesystem. + */ + public readonly logSegmentDeleteDelayMs!: pulumi.Output; + /** + * The maximum size of message that the server can receive. + */ + public readonly messageMaxBytes!: pulumi.Output; + + /** + * Create a DatabaseKafkaConfig resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: DatabaseKafkaConfigArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: DatabaseKafkaConfigArgs | DatabaseKafkaConfigState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as DatabaseKafkaConfigState | undefined; + resourceInputs["autoCreateTopicsEnable"] = state ? state.autoCreateTopicsEnable : undefined; + resourceInputs["clusterId"] = state ? state.clusterId : undefined; + resourceInputs["groupInitialRebalanceDelayMs"] = state ? state.groupInitialRebalanceDelayMs : undefined; + resourceInputs["groupMaxSessionTimeoutMs"] = state ? state.groupMaxSessionTimeoutMs : undefined; + resourceInputs["groupMinSessionTimeoutMs"] = state ? state.groupMinSessionTimeoutMs : undefined; + resourceInputs["logCleanerDeleteRetentionMs"] = state ? state.logCleanerDeleteRetentionMs : undefined; + resourceInputs["logCleanerMinCompactionLagMs"] = state ? state.logCleanerMinCompactionLagMs : undefined; + resourceInputs["logFlushIntervalMs"] = state ? state.logFlushIntervalMs : undefined; + resourceInputs["logIndexIntervalBytes"] = state ? state.logIndexIntervalBytes : undefined; + resourceInputs["logMessageDownconversionEnable"] = state ? state.logMessageDownconversionEnable : undefined; + resourceInputs["logMessageTimestampDifferenceMaxMs"] = state ? state.logMessageTimestampDifferenceMaxMs : undefined; + resourceInputs["logPreallocate"] = state ? state.logPreallocate : undefined; + resourceInputs["logRetentionBytes"] = state ? state.logRetentionBytes : undefined; + resourceInputs["logRetentionHours"] = state ? state.logRetentionHours : undefined; + resourceInputs["logRetentionMs"] = state ? state.logRetentionMs : undefined; + resourceInputs["logRollJitterMs"] = state ? state.logRollJitterMs : undefined; + resourceInputs["logSegmentDeleteDelayMs"] = state ? state.logSegmentDeleteDelayMs : undefined; + resourceInputs["messageMaxBytes"] = state ? state.messageMaxBytes : undefined; + } else { + const args = argsOrState as DatabaseKafkaConfigArgs | undefined; + if ((!args || args.clusterId === undefined) && !opts.urn) { + throw new Error("Missing required property 'clusterId'"); + } + resourceInputs["autoCreateTopicsEnable"] = args ? args.autoCreateTopicsEnable : undefined; + resourceInputs["clusterId"] = args ? args.clusterId : undefined; + resourceInputs["groupInitialRebalanceDelayMs"] = args ? args.groupInitialRebalanceDelayMs : undefined; + resourceInputs["groupMaxSessionTimeoutMs"] = args ? args.groupMaxSessionTimeoutMs : undefined; + resourceInputs["groupMinSessionTimeoutMs"] = args ? args.groupMinSessionTimeoutMs : undefined; + resourceInputs["logCleanerDeleteRetentionMs"] = args ? args.logCleanerDeleteRetentionMs : undefined; + resourceInputs["logCleanerMinCompactionLagMs"] = args ? args.logCleanerMinCompactionLagMs : undefined; + resourceInputs["logFlushIntervalMs"] = args ? args.logFlushIntervalMs : undefined; + resourceInputs["logIndexIntervalBytes"] = args ? args.logIndexIntervalBytes : undefined; + resourceInputs["logMessageDownconversionEnable"] = args ? args.logMessageDownconversionEnable : undefined; + resourceInputs["logMessageTimestampDifferenceMaxMs"] = args ? args.logMessageTimestampDifferenceMaxMs : undefined; + resourceInputs["logPreallocate"] = args ? args.logPreallocate : undefined; + resourceInputs["logRetentionBytes"] = args ? args.logRetentionBytes : undefined; + resourceInputs["logRetentionHours"] = args ? args.logRetentionHours : undefined; + resourceInputs["logRetentionMs"] = args ? args.logRetentionMs : undefined; + resourceInputs["logRollJitterMs"] = args ? args.logRollJitterMs : undefined; + resourceInputs["logSegmentDeleteDelayMs"] = args ? args.logSegmentDeleteDelayMs : undefined; + resourceInputs["messageMaxBytes"] = args ? args.messageMaxBytes : undefined; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(DatabaseKafkaConfig.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering DatabaseKafkaConfig resources. + */ +export interface DatabaseKafkaConfigState { + /** + * Enable auto creation of topics. + */ + autoCreateTopicsEnable?: pulumi.Input; + /** + * The ID of the target Kafka cluster. + */ + clusterId?: pulumi.Input; + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + */ + groupInitialRebalanceDelayMs?: pulumi.Input; + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + groupMaxSessionTimeoutMs?: pulumi.Input; + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + groupMinSessionTimeoutMs?: pulumi.Input; + /** + * How long are delete records retained? + */ + logCleanerDeleteRetentionMs?: pulumi.Input; + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + */ + logCleanerMinCompactionLagMs?: pulumi.Input; + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + */ + logFlushIntervalMs?: pulumi.Input; + /** + * The interval with which Kafka adds an entry to the offset index. + */ + logIndexIntervalBytes?: pulumi.Input; + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + */ + logMessageDownconversionEnable?: pulumi.Input; + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + */ + logMessageTimestampDifferenceMaxMs?: pulumi.Input; + /** + * Controls whether to preallocate a file when creating a new segment. + */ + logPreallocate?: pulumi.Input; + /** + * The maximum size of the log before deleting messages. + */ + logRetentionBytes?: pulumi.Input; + /** + * The number of hours to keep a log file before deleting it. + */ + logRetentionHours?: pulumi.Input; + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + */ + logRetentionMs?: pulumi.Input; + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + */ + logRollJitterMs?: pulumi.Input; + /** + * The amount of time to wait before deleting a file from the filesystem. + */ + logSegmentDeleteDelayMs?: pulumi.Input; + /** + * The maximum size of message that the server can receive. + */ + messageMaxBytes?: pulumi.Input; +} + +/** + * The set of arguments for constructing a DatabaseKafkaConfig resource. + */ +export interface DatabaseKafkaConfigArgs { + /** + * Enable auto creation of topics. + */ + autoCreateTopicsEnable?: pulumi.Input; + /** + * The ID of the target Kafka cluster. + */ + clusterId: pulumi.Input; + /** + * The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + */ + groupInitialRebalanceDelayMs?: pulumi.Input; + /** + * The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + groupMaxSessionTimeoutMs?: pulumi.Input; + /** + * The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + */ + groupMinSessionTimeoutMs?: pulumi.Input; + /** + * How long are delete records retained? + */ + logCleanerDeleteRetentionMs?: pulumi.Input; + /** + * The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + */ + logCleanerMinCompactionLagMs?: pulumi.Input; + /** + * The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + */ + logFlushIntervalMs?: pulumi.Input; + /** + * The interval with which Kafka adds an entry to the offset index. + */ + logIndexIntervalBytes?: pulumi.Input; + /** + * This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + */ + logMessageDownconversionEnable?: pulumi.Input; + /** + * The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + */ + logMessageTimestampDifferenceMaxMs?: pulumi.Input; + /** + * Controls whether to preallocate a file when creating a new segment. + */ + logPreallocate?: pulumi.Input; + /** + * The maximum size of the log before deleting messages. + */ + logRetentionBytes?: pulumi.Input; + /** + * The number of hours to keep a log file before deleting it. + */ + logRetentionHours?: pulumi.Input; + /** + * The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + */ + logRetentionMs?: pulumi.Input; + /** + * The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + */ + logRollJitterMs?: pulumi.Input; + /** + * The amount of time to wait before deleting a file from the filesystem. + */ + logSegmentDeleteDelayMs?: pulumi.Input; + /** + * The maximum size of message that the server can receive. + */ + messageMaxBytes?: pulumi.Input; +} diff --git a/sdk/nodejs/databaseMongodbConfig.ts b/sdk/nodejs/databaseMongodbConfig.ts new file mode 100644 index 00000000..386c1dce --- /dev/null +++ b/sdk/nodejs/databaseMongodbConfig.ts @@ -0,0 +1,192 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "./utilities"; + +/** + * Provides a virtual resource that can be used to change advanced configuration + * options for a DigitalOcean managed MongoDB database cluster. + * + * > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as digitalocean from "@pulumi/digitalocean"; + * + * const exampleDatabaseCluster = new digitalocean.DatabaseCluster("example", { + * name: "example-mongodb-cluster", + * engine: "mongodb", + * version: "7", + * size: digitalocean.DatabaseSlug.DB_1VPCU1GB, + * region: digitalocean.Region.NYC3, + * nodeCount: 1, + * }); + * const example = new digitalocean.DatabaseMongodbConfig("example", { + * clusterId: exampleDatabaseCluster.id, + * defaultReadConcern: "majority", + * defaultWriteConcern: "majority", + * transactionLifetimeLimitSeconds: 100, + * slowOpThresholdMs: 100, + * verbosity: 3, + * }); + * ``` + * + * ## Import + * + * A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. + * + * ```sh + * $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + * ``` + */ +export class DatabaseMongodbConfig extends pulumi.CustomResource { + /** + * Get an existing DatabaseMongodbConfig resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: DatabaseMongodbConfigState, opts?: pulumi.CustomResourceOptions): DatabaseMongodbConfig { + return new DatabaseMongodbConfig(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig'; + + /** + * Returns true if the given object is an instance of DatabaseMongodbConfig. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is DatabaseMongodbConfig { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === DatabaseMongodbConfig.__pulumiType; + } + + /** + * The ID of the target MongoDB cluster. + */ + public readonly clusterId!: pulumi.Output; + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + */ + public readonly defaultReadConcern!: pulumi.Output; + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + */ + public readonly defaultWriteConcern!: pulumi.Output; + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + */ + public readonly slowOpThresholdMs!: pulumi.Output; + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + */ + public readonly transactionLifetimeLimitSeconds!: pulumi.Output; + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + */ + public readonly verbosity!: pulumi.Output; + + /** + * Create a DatabaseMongodbConfig resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: DatabaseMongodbConfigArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: DatabaseMongodbConfigArgs | DatabaseMongodbConfigState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as DatabaseMongodbConfigState | undefined; + resourceInputs["clusterId"] = state ? state.clusterId : undefined; + resourceInputs["defaultReadConcern"] = state ? state.defaultReadConcern : undefined; + resourceInputs["defaultWriteConcern"] = state ? state.defaultWriteConcern : undefined; + resourceInputs["slowOpThresholdMs"] = state ? state.slowOpThresholdMs : undefined; + resourceInputs["transactionLifetimeLimitSeconds"] = state ? state.transactionLifetimeLimitSeconds : undefined; + resourceInputs["verbosity"] = state ? state.verbosity : undefined; + } else { + const args = argsOrState as DatabaseMongodbConfigArgs | undefined; + if ((!args || args.clusterId === undefined) && !opts.urn) { + throw new Error("Missing required property 'clusterId'"); + } + resourceInputs["clusterId"] = args ? args.clusterId : undefined; + resourceInputs["defaultReadConcern"] = args ? args.defaultReadConcern : undefined; + resourceInputs["defaultWriteConcern"] = args ? args.defaultWriteConcern : undefined; + resourceInputs["slowOpThresholdMs"] = args ? args.slowOpThresholdMs : undefined; + resourceInputs["transactionLifetimeLimitSeconds"] = args ? args.transactionLifetimeLimitSeconds : undefined; + resourceInputs["verbosity"] = args ? args.verbosity : undefined; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(DatabaseMongodbConfig.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering DatabaseMongodbConfig resources. + */ +export interface DatabaseMongodbConfigState { + /** + * The ID of the target MongoDB cluster. + */ + clusterId?: pulumi.Input; + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + */ + defaultReadConcern?: pulumi.Input; + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + */ + defaultWriteConcern?: pulumi.Input; + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + */ + slowOpThresholdMs?: pulumi.Input; + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + */ + transactionLifetimeLimitSeconds?: pulumi.Input; + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + */ + verbosity?: pulumi.Input; +} + +/** + * The set of arguments for constructing a DatabaseMongodbConfig resource. + */ +export interface DatabaseMongodbConfigArgs { + /** + * The ID of the target MongoDB cluster. + */ + clusterId: pulumi.Input; + /** + * Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + */ + defaultReadConcern?: pulumi.Input; + /** + * Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + */ + defaultWriteConcern?: pulumi.Input; + /** + * Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + */ + slowOpThresholdMs?: pulumi.Input; + /** + * Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + */ + transactionLifetimeLimitSeconds?: pulumi.Input; + /** + * The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + */ + verbosity?: pulumi.Input; +} diff --git a/sdk/nodejs/index.ts b/sdk/nodejs/index.ts index f71b7430..70e89600 100644 --- a/sdk/nodejs/index.ts +++ b/sdk/nodejs/index.ts @@ -55,11 +55,21 @@ export type DatabaseFirewall = import("./databaseFirewall").DatabaseFirewall; export const DatabaseFirewall: typeof import("./databaseFirewall").DatabaseFirewall = null as any; utilities.lazyLoad(exports, ["DatabaseFirewall"], () => require("./databaseFirewall")); +export { DatabaseKafkaConfigArgs, DatabaseKafkaConfigState } from "./databaseKafkaConfig"; +export type DatabaseKafkaConfig = import("./databaseKafkaConfig").DatabaseKafkaConfig; +export const DatabaseKafkaConfig: typeof import("./databaseKafkaConfig").DatabaseKafkaConfig = null as any; +utilities.lazyLoad(exports, ["DatabaseKafkaConfig"], () => require("./databaseKafkaConfig")); + export { DatabaseKafkaTopicArgs, DatabaseKafkaTopicState } from "./databaseKafkaTopic"; export type DatabaseKafkaTopic = import("./databaseKafkaTopic").DatabaseKafkaTopic; export const DatabaseKafkaTopic: typeof import("./databaseKafkaTopic").DatabaseKafkaTopic = null as any; utilities.lazyLoad(exports, ["DatabaseKafkaTopic"], () => require("./databaseKafkaTopic")); +export { DatabaseMongodbConfigArgs, DatabaseMongodbConfigState } from "./databaseMongodbConfig"; +export type DatabaseMongodbConfig = import("./databaseMongodbConfig").DatabaseMongodbConfig; +export const DatabaseMongodbConfig: typeof import("./databaseMongodbConfig").DatabaseMongodbConfig = null as any; +utilities.lazyLoad(exports, ["DatabaseMongodbConfig"], () => require("./databaseMongodbConfig")); + export { DatabaseMysqlConfigArgs, DatabaseMysqlConfigState } from "./databaseMysqlConfig"; export type DatabaseMysqlConfig = import("./databaseMysqlConfig").DatabaseMysqlConfig; export const DatabaseMysqlConfig: typeof import("./databaseMysqlConfig").DatabaseMysqlConfig = null as any; @@ -472,8 +482,12 @@ const _module = { return new DatabaseDb(name, undefined, { urn }) case "digitalocean:index/databaseFirewall:DatabaseFirewall": return new DatabaseFirewall(name, undefined, { urn }) + case "digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig": + return new DatabaseKafkaConfig(name, undefined, { urn }) case "digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic": return new DatabaseKafkaTopic(name, undefined, { urn }) + case "digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig": + return new DatabaseMongodbConfig(name, undefined, { urn }) case "digitalocean:index/databaseMysqlConfig:DatabaseMysqlConfig": return new DatabaseMysqlConfig(name, undefined, { urn }) case "digitalocean:index/databasePostgresqlConfig:DatabasePostgresqlConfig": @@ -555,7 +569,9 @@ pulumi.runtime.registerResourceModule("digitalocean", "index/databaseCluster", _ pulumi.runtime.registerResourceModule("digitalocean", "index/databaseConnectionPool", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databaseDb", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databaseFirewall", _module) +pulumi.runtime.registerResourceModule("digitalocean", "index/databaseKafkaConfig", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databaseKafkaTopic", _module) +pulumi.runtime.registerResourceModule("digitalocean", "index/databaseMongodbConfig", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databaseMysqlConfig", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databasePostgresqlConfig", _module) pulumi.runtime.registerResourceModule("digitalocean", "index/databaseRedisConfig", _module) diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index c6482869..09cccf7b 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -25,7 +25,9 @@ "databaseConnectionPool.ts", "databaseDb.ts", "databaseFirewall.ts", + "databaseKafkaConfig.ts", "databaseKafkaTopic.ts", + "databaseMongodbConfig.ts", "databaseMysqlConfig.ts", "databasePostgresqlConfig.ts", "databaseRedisConfig.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index 2ea50bf5..92236485 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -1812,6 +1812,7 @@ export interface DatabaseUserSetting { * An individual ACL includes the following: */ acls?: pulumi.Input[]>; + opensearchAcls?: pulumi.Input[]>; } export interface DatabaseUserSettingAcl { @@ -1829,6 +1830,14 @@ export interface DatabaseUserSettingAcl { topic: pulumi.Input; } +export interface DatabaseUserSettingOpensearchAcl { + index: pulumi.Input; + /** + * The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + */ + permission: pulumi.Input; +} + export interface FirewallInboundRule { /** * The ports on which traffic will be allowed diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index a0b194ab..9f72bde9 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -1812,6 +1812,7 @@ export interface DatabaseUserSetting { * An individual ACL includes the following: */ acls?: outputs.DatabaseUserSettingAcl[]; + opensearchAcls?: outputs.DatabaseUserSettingOpensearchAcl[]; } export interface DatabaseUserSettingAcl { @@ -1829,6 +1830,14 @@ export interface DatabaseUserSettingAcl { topic: string; } +export interface DatabaseUserSettingOpensearchAcl { + index: string; + /** + * The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + */ + permission: string; +} + export interface FirewallInboundRule { /** * The ports on which traffic will be allowed diff --git a/sdk/python/pulumi_digitalocean/__init__.py b/sdk/python/pulumi_digitalocean/__init__.py index a4b78099..52bb54a1 100644 --- a/sdk/python/pulumi_digitalocean/__init__.py +++ b/sdk/python/pulumi_digitalocean/__init__.py @@ -16,7 +16,9 @@ from .database_connection_pool import * from .database_db import * from .database_firewall import * +from .database_kafka_config import * from .database_kafka_topic import * +from .database_mongodb_config import * from .database_mysql_config import * from .database_postgresql_config import * from .database_redis_config import * @@ -185,6 +187,14 @@ "digitalocean:index/databaseFirewall:DatabaseFirewall": "DatabaseFirewall" } }, + { + "pkg": "digitalocean", + "mod": "index/databaseKafkaConfig", + "fqn": "pulumi_digitalocean", + "classes": { + "digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig": "DatabaseKafkaConfig" + } + }, { "pkg": "digitalocean", "mod": "index/databaseKafkaTopic", @@ -193,6 +203,14 @@ "digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic": "DatabaseKafkaTopic" } }, + { + "pkg": "digitalocean", + "mod": "index/databaseMongodbConfig", + "fqn": "pulumi_digitalocean", + "classes": { + "digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig": "DatabaseMongodbConfig" + } + }, { "pkg": "digitalocean", "mod": "index/databaseMysqlConfig", diff --git a/sdk/python/pulumi_digitalocean/_inputs.py b/sdk/python/pulumi_digitalocean/_inputs.py index 76a6f64f..2f8f18d9 100644 --- a/sdk/python/pulumi_digitalocean/_inputs.py +++ b/sdk/python/pulumi_digitalocean/_inputs.py @@ -106,6 +106,7 @@ 'DatabasePostgresqlConfigTimescaledbArgs', 'DatabaseUserSettingArgs', 'DatabaseUserSettingAclArgs', + 'DatabaseUserSettingOpensearchAclArgs', 'FirewallInboundRuleArgs', 'FirewallOutboundRuleArgs', 'FirewallPendingChangeArgs', @@ -6895,7 +6896,8 @@ def max_background_workers(self, value: Optional[pulumi.Input[int]]): @pulumi.input_type class DatabaseUserSettingArgs: def __init__(__self__, *, - acls: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingAclArgs']]]] = None): + acls: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingAclArgs']]]] = None, + opensearch_acls: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingOpensearchAclArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingAclArgs']]] acls: A set of ACLs (Access Control Lists) specifying permission on topics with a Kafka cluster. The properties of an individual ACL are described below: @@ -6903,6 +6905,8 @@ def __init__(__self__, *, """ if acls is not None: pulumi.set(__self__, "acls", acls) + if opensearch_acls is not None: + pulumi.set(__self__, "opensearch_acls", opensearch_acls) @property @pulumi.getter @@ -6918,6 +6922,15 @@ def acls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSetti def acls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingAclArgs']]]]): pulumi.set(self, "acls", value) + @property + @pulumi.getter(name="opensearchAcls") + def opensearch_acls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingOpensearchAclArgs']]]]: + return pulumi.get(self, "opensearch_acls") + + @opensearch_acls.setter + def opensearch_acls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DatabaseUserSettingOpensearchAclArgs']]]]): + pulumi.set(self, "opensearch_acls", value) + @pulumi.input_type class DatabaseUserSettingAclArgs: @@ -6972,6 +6985,39 @@ def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) +@pulumi.input_type +class DatabaseUserSettingOpensearchAclArgs: + def __init__(__self__, *, + index: pulumi.Input[str], + permission: pulumi.Input[str]): + """ + :param pulumi.Input[str] permission: The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + """ + pulumi.set(__self__, "index", index) + pulumi.set(__self__, "permission", permission) + + @property + @pulumi.getter + def index(self) -> pulumi.Input[str]: + return pulumi.get(self, "index") + + @index.setter + def index(self, value: pulumi.Input[str]): + pulumi.set(self, "index", value) + + @property + @pulumi.getter + def permission(self) -> pulumi.Input[str]: + """ + The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + """ + return pulumi.get(self, "permission") + + @permission.setter + def permission(self, value: pulumi.Input[str]): + pulumi.set(self, "permission", value) + + @pulumi.input_type class FirewallInboundRuleArgs: def __init__(__self__, *, diff --git a/sdk/python/pulumi_digitalocean/database_kafka_config.py b/sdk/python/pulumi_digitalocean/database_kafka_config.py new file mode 100644 index 00000000..2656ea4f --- /dev/null +++ b/sdk/python/pulumi_digitalocean/database_kafka_config.py @@ -0,0 +1,1035 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities + +__all__ = ['DatabaseKafkaConfigArgs', 'DatabaseKafkaConfig'] + +@pulumi.input_type +class DatabaseKafkaConfigArgs: + def __init__(__self__, *, + cluster_id: pulumi.Input[str], + auto_create_topics_enable: Optional[pulumi.Input[bool]] = None, + group_initial_rebalance_delay_ms: Optional[pulumi.Input[int]] = None, + group_max_session_timeout_ms: Optional[pulumi.Input[int]] = None, + group_min_session_timeout_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_delete_retention_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_min_compaction_lag_ms: Optional[pulumi.Input[str]] = None, + log_flush_interval_ms: Optional[pulumi.Input[str]] = None, + log_index_interval_bytes: Optional[pulumi.Input[int]] = None, + log_message_downconversion_enable: Optional[pulumi.Input[bool]] = None, + log_message_timestamp_difference_max_ms: Optional[pulumi.Input[str]] = None, + log_preallocate: Optional[pulumi.Input[bool]] = None, + log_retention_bytes: Optional[pulumi.Input[str]] = None, + log_retention_hours: Optional[pulumi.Input[int]] = None, + log_retention_ms: Optional[pulumi.Input[str]] = None, + log_roll_jitter_ms: Optional[pulumi.Input[str]] = None, + log_segment_delete_delay_ms: Optional[pulumi.Input[int]] = None, + message_max_bytes: Optional[pulumi.Input[int]] = None): + """ + The set of arguments for constructing a DatabaseKafkaConfig resource. + :param pulumi.Input[str] cluster_id: The ID of the target Kafka cluster. + :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics. + :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? + :param pulumi.Input[str] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + :param pulumi.Input[str] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. + :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + :param pulumi.Input[str] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + :param pulumi.Input[bool] log_preallocate: Controls whether to preallocate a file when creating a new segment. + :param pulumi.Input[str] log_retention_bytes: The maximum size of the log before deleting messages. + :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it. + :param pulumi.Input[str] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + :param pulumi.Input[str] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. + :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. + """ + pulumi.set(__self__, "cluster_id", cluster_id) + if auto_create_topics_enable is not None: + pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable) + if group_initial_rebalance_delay_ms is not None: + pulumi.set(__self__, "group_initial_rebalance_delay_ms", group_initial_rebalance_delay_ms) + if group_max_session_timeout_ms is not None: + pulumi.set(__self__, "group_max_session_timeout_ms", group_max_session_timeout_ms) + if group_min_session_timeout_ms is not None: + pulumi.set(__self__, "group_min_session_timeout_ms", group_min_session_timeout_ms) + if log_cleaner_delete_retention_ms is not None: + pulumi.set(__self__, "log_cleaner_delete_retention_ms", log_cleaner_delete_retention_ms) + if log_cleaner_min_compaction_lag_ms is not None: + pulumi.set(__self__, "log_cleaner_min_compaction_lag_ms", log_cleaner_min_compaction_lag_ms) + if log_flush_interval_ms is not None: + pulumi.set(__self__, "log_flush_interval_ms", log_flush_interval_ms) + if log_index_interval_bytes is not None: + pulumi.set(__self__, "log_index_interval_bytes", log_index_interval_bytes) + if log_message_downconversion_enable is not None: + pulumi.set(__self__, "log_message_downconversion_enable", log_message_downconversion_enable) + if log_message_timestamp_difference_max_ms is not None: + pulumi.set(__self__, "log_message_timestamp_difference_max_ms", log_message_timestamp_difference_max_ms) + if log_preallocate is not None: + pulumi.set(__self__, "log_preallocate", log_preallocate) + if log_retention_bytes is not None: + pulumi.set(__self__, "log_retention_bytes", log_retention_bytes) + if log_retention_hours is not None: + pulumi.set(__self__, "log_retention_hours", log_retention_hours) + if log_retention_ms is not None: + pulumi.set(__self__, "log_retention_ms", log_retention_ms) + if log_roll_jitter_ms is not None: + pulumi.set(__self__, "log_roll_jitter_ms", log_roll_jitter_ms) + if log_segment_delete_delay_ms is not None: + pulumi.set(__self__, "log_segment_delete_delay_ms", log_segment_delete_delay_ms) + if message_max_bytes is not None: + pulumi.set(__self__, "message_max_bytes", message_max_bytes) + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> pulumi.Input[str]: + """ + The ID of the target Kafka cluster. + """ + return pulumi.get(self, "cluster_id") + + @cluster_id.setter + def cluster_id(self, value: pulumi.Input[str]): + pulumi.set(self, "cluster_id", value) + + @property + @pulumi.getter(name="autoCreateTopicsEnable") + def auto_create_topics_enable(self) -> Optional[pulumi.Input[bool]]: + """ + Enable auto creation of topics. + """ + return pulumi.get(self, "auto_create_topics_enable") + + @auto_create_topics_enable.setter + def auto_create_topics_enable(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "auto_create_topics_enable", value) + + @property + @pulumi.getter(name="groupInitialRebalanceDelayMs") + def group_initial_rebalance_delay_ms(self) -> Optional[pulumi.Input[int]]: + """ + The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + """ + return pulumi.get(self, "group_initial_rebalance_delay_ms") + + @group_initial_rebalance_delay_ms.setter + def group_initial_rebalance_delay_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_initial_rebalance_delay_ms", value) + + @property + @pulumi.getter(name="groupMaxSessionTimeoutMs") + def group_max_session_timeout_ms(self) -> Optional[pulumi.Input[int]]: + """ + The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_max_session_timeout_ms") + + @group_max_session_timeout_ms.setter + def group_max_session_timeout_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_max_session_timeout_ms", value) + + @property + @pulumi.getter(name="groupMinSessionTimeoutMs") + def group_min_session_timeout_ms(self) -> Optional[pulumi.Input[int]]: + """ + The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_min_session_timeout_ms") + + @group_min_session_timeout_ms.setter + def group_min_session_timeout_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_min_session_timeout_ms", value) + + @property + @pulumi.getter(name="logCleanerDeleteRetentionMs") + def log_cleaner_delete_retention_ms(self) -> Optional[pulumi.Input[int]]: + """ + How long are delete records retained? + """ + return pulumi.get(self, "log_cleaner_delete_retention_ms") + + @log_cleaner_delete_retention_ms.setter + def log_cleaner_delete_retention_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_cleaner_delete_retention_ms", value) + + @property + @pulumi.getter(name="logCleanerMinCompactionLagMs") + def log_cleaner_min_compaction_lag_ms(self) -> Optional[pulumi.Input[str]]: + """ + The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + """ + return pulumi.get(self, "log_cleaner_min_compaction_lag_ms") + + @log_cleaner_min_compaction_lag_ms.setter + def log_cleaner_min_compaction_lag_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_cleaner_min_compaction_lag_ms", value) + + @property + @pulumi.getter(name="logFlushIntervalMs") + def log_flush_interval_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + """ + return pulumi.get(self, "log_flush_interval_ms") + + @log_flush_interval_ms.setter + def log_flush_interval_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_flush_interval_ms", value) + + @property + @pulumi.getter(name="logIndexIntervalBytes") + def log_index_interval_bytes(self) -> Optional[pulumi.Input[int]]: + """ + The interval with which Kafka adds an entry to the offset index. + """ + return pulumi.get(self, "log_index_interval_bytes") + + @log_index_interval_bytes.setter + def log_index_interval_bytes(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_index_interval_bytes", value) + + @property + @pulumi.getter(name="logMessageDownconversionEnable") + def log_message_downconversion_enable(self) -> Optional[pulumi.Input[bool]]: + """ + This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + """ + return pulumi.get(self, "log_message_downconversion_enable") + + @log_message_downconversion_enable.setter + def log_message_downconversion_enable(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "log_message_downconversion_enable", value) + + @property + @pulumi.getter(name="logMessageTimestampDifferenceMaxMs") + def log_message_timestamp_difference_max_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + """ + return pulumi.get(self, "log_message_timestamp_difference_max_ms") + + @log_message_timestamp_difference_max_ms.setter + def log_message_timestamp_difference_max_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_message_timestamp_difference_max_ms", value) + + @property + @pulumi.getter(name="logPreallocate") + def log_preallocate(self) -> Optional[pulumi.Input[bool]]: + """ + Controls whether to preallocate a file when creating a new segment. + """ + return pulumi.get(self, "log_preallocate") + + @log_preallocate.setter + def log_preallocate(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "log_preallocate", value) + + @property + @pulumi.getter(name="logRetentionBytes") + def log_retention_bytes(self) -> Optional[pulumi.Input[str]]: + """ + The maximum size of the log before deleting messages. + """ + return pulumi.get(self, "log_retention_bytes") + + @log_retention_bytes.setter + def log_retention_bytes(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_retention_bytes", value) + + @property + @pulumi.getter(name="logRetentionHours") + def log_retention_hours(self) -> Optional[pulumi.Input[int]]: + """ + The number of hours to keep a log file before deleting it. + """ + return pulumi.get(self, "log_retention_hours") + + @log_retention_hours.setter + def log_retention_hours(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_retention_hours", value) + + @property + @pulumi.getter(name="logRetentionMs") + def log_retention_ms(self) -> Optional[pulumi.Input[str]]: + """ + The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + """ + return pulumi.get(self, "log_retention_ms") + + @log_retention_ms.setter + def log_retention_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_retention_ms", value) + + @property + @pulumi.getter(name="logRollJitterMs") + def log_roll_jitter_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + """ + return pulumi.get(self, "log_roll_jitter_ms") + + @log_roll_jitter_ms.setter + def log_roll_jitter_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_roll_jitter_ms", value) + + @property + @pulumi.getter(name="logSegmentDeleteDelayMs") + def log_segment_delete_delay_ms(self) -> Optional[pulumi.Input[int]]: + """ + The amount of time to wait before deleting a file from the filesystem. + """ + return pulumi.get(self, "log_segment_delete_delay_ms") + + @log_segment_delete_delay_ms.setter + def log_segment_delete_delay_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_segment_delete_delay_ms", value) + + @property + @pulumi.getter(name="messageMaxBytes") + def message_max_bytes(self) -> Optional[pulumi.Input[int]]: + """ + The maximum size of message that the server can receive. + """ + return pulumi.get(self, "message_max_bytes") + + @message_max_bytes.setter + def message_max_bytes(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "message_max_bytes", value) + + +@pulumi.input_type +class _DatabaseKafkaConfigState: + def __init__(__self__, *, + auto_create_topics_enable: Optional[pulumi.Input[bool]] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + group_initial_rebalance_delay_ms: Optional[pulumi.Input[int]] = None, + group_max_session_timeout_ms: Optional[pulumi.Input[int]] = None, + group_min_session_timeout_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_delete_retention_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_min_compaction_lag_ms: Optional[pulumi.Input[str]] = None, + log_flush_interval_ms: Optional[pulumi.Input[str]] = None, + log_index_interval_bytes: Optional[pulumi.Input[int]] = None, + log_message_downconversion_enable: Optional[pulumi.Input[bool]] = None, + log_message_timestamp_difference_max_ms: Optional[pulumi.Input[str]] = None, + log_preallocate: Optional[pulumi.Input[bool]] = None, + log_retention_bytes: Optional[pulumi.Input[str]] = None, + log_retention_hours: Optional[pulumi.Input[int]] = None, + log_retention_ms: Optional[pulumi.Input[str]] = None, + log_roll_jitter_ms: Optional[pulumi.Input[str]] = None, + log_segment_delete_delay_ms: Optional[pulumi.Input[int]] = None, + message_max_bytes: Optional[pulumi.Input[int]] = None): + """ + Input properties used for looking up and filtering DatabaseKafkaConfig resources. + :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics. + :param pulumi.Input[str] cluster_id: The ID of the target Kafka cluster. + :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? + :param pulumi.Input[str] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + :param pulumi.Input[str] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. + :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + :param pulumi.Input[str] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + :param pulumi.Input[bool] log_preallocate: Controls whether to preallocate a file when creating a new segment. + :param pulumi.Input[str] log_retention_bytes: The maximum size of the log before deleting messages. + :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it. + :param pulumi.Input[str] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + :param pulumi.Input[str] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. + :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. + """ + if auto_create_topics_enable is not None: + pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable) + if cluster_id is not None: + pulumi.set(__self__, "cluster_id", cluster_id) + if group_initial_rebalance_delay_ms is not None: + pulumi.set(__self__, "group_initial_rebalance_delay_ms", group_initial_rebalance_delay_ms) + if group_max_session_timeout_ms is not None: + pulumi.set(__self__, "group_max_session_timeout_ms", group_max_session_timeout_ms) + if group_min_session_timeout_ms is not None: + pulumi.set(__self__, "group_min_session_timeout_ms", group_min_session_timeout_ms) + if log_cleaner_delete_retention_ms is not None: + pulumi.set(__self__, "log_cleaner_delete_retention_ms", log_cleaner_delete_retention_ms) + if log_cleaner_min_compaction_lag_ms is not None: + pulumi.set(__self__, "log_cleaner_min_compaction_lag_ms", log_cleaner_min_compaction_lag_ms) + if log_flush_interval_ms is not None: + pulumi.set(__self__, "log_flush_interval_ms", log_flush_interval_ms) + if log_index_interval_bytes is not None: + pulumi.set(__self__, "log_index_interval_bytes", log_index_interval_bytes) + if log_message_downconversion_enable is not None: + pulumi.set(__self__, "log_message_downconversion_enable", log_message_downconversion_enable) + if log_message_timestamp_difference_max_ms is not None: + pulumi.set(__self__, "log_message_timestamp_difference_max_ms", log_message_timestamp_difference_max_ms) + if log_preallocate is not None: + pulumi.set(__self__, "log_preallocate", log_preallocate) + if log_retention_bytes is not None: + pulumi.set(__self__, "log_retention_bytes", log_retention_bytes) + if log_retention_hours is not None: + pulumi.set(__self__, "log_retention_hours", log_retention_hours) + if log_retention_ms is not None: + pulumi.set(__self__, "log_retention_ms", log_retention_ms) + if log_roll_jitter_ms is not None: + pulumi.set(__self__, "log_roll_jitter_ms", log_roll_jitter_ms) + if log_segment_delete_delay_ms is not None: + pulumi.set(__self__, "log_segment_delete_delay_ms", log_segment_delete_delay_ms) + if message_max_bytes is not None: + pulumi.set(__self__, "message_max_bytes", message_max_bytes) + + @property + @pulumi.getter(name="autoCreateTopicsEnable") + def auto_create_topics_enable(self) -> Optional[pulumi.Input[bool]]: + """ + Enable auto creation of topics. + """ + return pulumi.get(self, "auto_create_topics_enable") + + @auto_create_topics_enable.setter + def auto_create_topics_enable(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "auto_create_topics_enable", value) + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the target Kafka cluster. + """ + return pulumi.get(self, "cluster_id") + + @cluster_id.setter + def cluster_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "cluster_id", value) + + @property + @pulumi.getter(name="groupInitialRebalanceDelayMs") + def group_initial_rebalance_delay_ms(self) -> Optional[pulumi.Input[int]]: + """ + The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + """ + return pulumi.get(self, "group_initial_rebalance_delay_ms") + + @group_initial_rebalance_delay_ms.setter + def group_initial_rebalance_delay_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_initial_rebalance_delay_ms", value) + + @property + @pulumi.getter(name="groupMaxSessionTimeoutMs") + def group_max_session_timeout_ms(self) -> Optional[pulumi.Input[int]]: + """ + The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_max_session_timeout_ms") + + @group_max_session_timeout_ms.setter + def group_max_session_timeout_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_max_session_timeout_ms", value) + + @property + @pulumi.getter(name="groupMinSessionTimeoutMs") + def group_min_session_timeout_ms(self) -> Optional[pulumi.Input[int]]: + """ + The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_min_session_timeout_ms") + + @group_min_session_timeout_ms.setter + def group_min_session_timeout_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "group_min_session_timeout_ms", value) + + @property + @pulumi.getter(name="logCleanerDeleteRetentionMs") + def log_cleaner_delete_retention_ms(self) -> Optional[pulumi.Input[int]]: + """ + How long are delete records retained? + """ + return pulumi.get(self, "log_cleaner_delete_retention_ms") + + @log_cleaner_delete_retention_ms.setter + def log_cleaner_delete_retention_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_cleaner_delete_retention_ms", value) + + @property + @pulumi.getter(name="logCleanerMinCompactionLagMs") + def log_cleaner_min_compaction_lag_ms(self) -> Optional[pulumi.Input[str]]: + """ + The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + """ + return pulumi.get(self, "log_cleaner_min_compaction_lag_ms") + + @log_cleaner_min_compaction_lag_ms.setter + def log_cleaner_min_compaction_lag_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_cleaner_min_compaction_lag_ms", value) + + @property + @pulumi.getter(name="logFlushIntervalMs") + def log_flush_interval_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + """ + return pulumi.get(self, "log_flush_interval_ms") + + @log_flush_interval_ms.setter + def log_flush_interval_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_flush_interval_ms", value) + + @property + @pulumi.getter(name="logIndexIntervalBytes") + def log_index_interval_bytes(self) -> Optional[pulumi.Input[int]]: + """ + The interval with which Kafka adds an entry to the offset index. + """ + return pulumi.get(self, "log_index_interval_bytes") + + @log_index_interval_bytes.setter + def log_index_interval_bytes(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_index_interval_bytes", value) + + @property + @pulumi.getter(name="logMessageDownconversionEnable") + def log_message_downconversion_enable(self) -> Optional[pulumi.Input[bool]]: + """ + This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + """ + return pulumi.get(self, "log_message_downconversion_enable") + + @log_message_downconversion_enable.setter + def log_message_downconversion_enable(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "log_message_downconversion_enable", value) + + @property + @pulumi.getter(name="logMessageTimestampDifferenceMaxMs") + def log_message_timestamp_difference_max_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + """ + return pulumi.get(self, "log_message_timestamp_difference_max_ms") + + @log_message_timestamp_difference_max_ms.setter + def log_message_timestamp_difference_max_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_message_timestamp_difference_max_ms", value) + + @property + @pulumi.getter(name="logPreallocate") + def log_preallocate(self) -> Optional[pulumi.Input[bool]]: + """ + Controls whether to preallocate a file when creating a new segment. + """ + return pulumi.get(self, "log_preallocate") + + @log_preallocate.setter + def log_preallocate(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "log_preallocate", value) + + @property + @pulumi.getter(name="logRetentionBytes") + def log_retention_bytes(self) -> Optional[pulumi.Input[str]]: + """ + The maximum size of the log before deleting messages. + """ + return pulumi.get(self, "log_retention_bytes") + + @log_retention_bytes.setter + def log_retention_bytes(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_retention_bytes", value) + + @property + @pulumi.getter(name="logRetentionHours") + def log_retention_hours(self) -> Optional[pulumi.Input[int]]: + """ + The number of hours to keep a log file before deleting it. + """ + return pulumi.get(self, "log_retention_hours") + + @log_retention_hours.setter + def log_retention_hours(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_retention_hours", value) + + @property + @pulumi.getter(name="logRetentionMs") + def log_retention_ms(self) -> Optional[pulumi.Input[str]]: + """ + The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + """ + return pulumi.get(self, "log_retention_ms") + + @log_retention_ms.setter + def log_retention_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_retention_ms", value) + + @property + @pulumi.getter(name="logRollJitterMs") + def log_roll_jitter_ms(self) -> Optional[pulumi.Input[str]]: + """ + The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + """ + return pulumi.get(self, "log_roll_jitter_ms") + + @log_roll_jitter_ms.setter + def log_roll_jitter_ms(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "log_roll_jitter_ms", value) + + @property + @pulumi.getter(name="logSegmentDeleteDelayMs") + def log_segment_delete_delay_ms(self) -> Optional[pulumi.Input[int]]: + """ + The amount of time to wait before deleting a file from the filesystem. + """ + return pulumi.get(self, "log_segment_delete_delay_ms") + + @log_segment_delete_delay_ms.setter + def log_segment_delete_delay_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "log_segment_delete_delay_ms", value) + + @property + @pulumi.getter(name="messageMaxBytes") + def message_max_bytes(self) -> Optional[pulumi.Input[int]]: + """ + The maximum size of message that the server can receive. + """ + return pulumi.get(self, "message_max_bytes") + + @message_max_bytes.setter + def message_max_bytes(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "message_max_bytes", value) + + +class DatabaseKafkaConfig(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + auto_create_topics_enable: Optional[pulumi.Input[bool]] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + group_initial_rebalance_delay_ms: Optional[pulumi.Input[int]] = None, + group_max_session_timeout_ms: Optional[pulumi.Input[int]] = None, + group_min_session_timeout_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_delete_retention_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_min_compaction_lag_ms: Optional[pulumi.Input[str]] = None, + log_flush_interval_ms: Optional[pulumi.Input[str]] = None, + log_index_interval_bytes: Optional[pulumi.Input[int]] = None, + log_message_downconversion_enable: Optional[pulumi.Input[bool]] = None, + log_message_timestamp_difference_max_ms: Optional[pulumi.Input[str]] = None, + log_preallocate: Optional[pulumi.Input[bool]] = None, + log_retention_bytes: Optional[pulumi.Input[str]] = None, + log_retention_hours: Optional[pulumi.Input[int]] = None, + log_retention_ms: Optional[pulumi.Input[str]] = None, + log_roll_jitter_ms: Optional[pulumi.Input[str]] = None, + log_segment_delete_delay_ms: Optional[pulumi.Input[int]] = None, + message_max_bytes: Optional[pulumi.Input[int]] = None, + __props__=None): + """ + Provides a virtual resource that can be used to change advanced configuration + options for a DigitalOcean managed Kafka database cluster. + + > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. + + ## Example Usage + + ```python + import pulumi + import pulumi_digitalocean as digitalocean + + example_database_cluster = digitalocean.DatabaseCluster("example", + name="example-kafka-cluster", + engine="kafka", + version="3.7", + size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB, + region=digitalocean.Region.NYC3, + node_count=3) + example = digitalocean.DatabaseKafkaConfig("example", + cluster_id=example_database_cluster.id, + group_initial_rebalance_delay_ms=3000, + group_min_session_timeout_ms=6000, + group_max_session_timeout_ms=1800000, + message_max_bytes=1048588, + log_cleaner_delete_retention_ms=86400000, + log_cleaner_min_compaction_lag_ms="0", + log_flush_interval_ms="9223372036854775807", + log_index_interval_bytes=4096, + log_message_downconversion_enable=True, + log_message_timestamp_difference_max_ms="9223372036854775807", + log_preallocate=False, + log_retention_bytes="-1", + log_retention_hours=168, + log_retention_ms="604800000", + log_roll_jitter_ms="0", + log_segment_delete_delay_ms=60000, + auto_create_topics_enable=True) + ``` + + ## Import + + A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. + + ```sh + $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics. + :param pulumi.Input[str] cluster_id: The ID of the target Kafka cluster. + :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? + :param pulumi.Input[str] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + :param pulumi.Input[str] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. + :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + :param pulumi.Input[str] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + :param pulumi.Input[bool] log_preallocate: Controls whether to preallocate a file when creating a new segment. + :param pulumi.Input[str] log_retention_bytes: The maximum size of the log before deleting messages. + :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it. + :param pulumi.Input[str] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + :param pulumi.Input[str] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. + :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: DatabaseKafkaConfigArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Provides a virtual resource that can be used to change advanced configuration + options for a DigitalOcean managed Kafka database cluster. + + > **Note** Kafka configurations are only removed from state when destroyed. The remote configuration is not unset. + + ## Example Usage + + ```python + import pulumi + import pulumi_digitalocean as digitalocean + + example_database_cluster = digitalocean.DatabaseCluster("example", + name="example-kafka-cluster", + engine="kafka", + version="3.7", + size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB, + region=digitalocean.Region.NYC3, + node_count=3) + example = digitalocean.DatabaseKafkaConfig("example", + cluster_id=example_database_cluster.id, + group_initial_rebalance_delay_ms=3000, + group_min_session_timeout_ms=6000, + group_max_session_timeout_ms=1800000, + message_max_bytes=1048588, + log_cleaner_delete_retention_ms=86400000, + log_cleaner_min_compaction_lag_ms="0", + log_flush_interval_ms="9223372036854775807", + log_index_interval_bytes=4096, + log_message_downconversion_enable=True, + log_message_timestamp_difference_max_ms="9223372036854775807", + log_preallocate=False, + log_retention_bytes="-1", + log_retention_hours=168, + log_retention_ms="604800000", + log_roll_jitter_ms="0", + log_segment_delete_delay_ms=60000, + auto_create_topics_enable=True) + ``` + + ## Import + + A Kafka database cluster's configuration can be imported using the `id` the parent cluster, e.g. + + ```sh + $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + ``` + + :param str resource_name: The name of the resource. + :param DatabaseKafkaConfigArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(DatabaseKafkaConfigArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + auto_create_topics_enable: Optional[pulumi.Input[bool]] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + group_initial_rebalance_delay_ms: Optional[pulumi.Input[int]] = None, + group_max_session_timeout_ms: Optional[pulumi.Input[int]] = None, + group_min_session_timeout_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_delete_retention_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_min_compaction_lag_ms: Optional[pulumi.Input[str]] = None, + log_flush_interval_ms: Optional[pulumi.Input[str]] = None, + log_index_interval_bytes: Optional[pulumi.Input[int]] = None, + log_message_downconversion_enable: Optional[pulumi.Input[bool]] = None, + log_message_timestamp_difference_max_ms: Optional[pulumi.Input[str]] = None, + log_preallocate: Optional[pulumi.Input[bool]] = None, + log_retention_bytes: Optional[pulumi.Input[str]] = None, + log_retention_hours: Optional[pulumi.Input[int]] = None, + log_retention_ms: Optional[pulumi.Input[str]] = None, + log_roll_jitter_ms: Optional[pulumi.Input[str]] = None, + log_segment_delete_delay_ms: Optional[pulumi.Input[int]] = None, + message_max_bytes: Optional[pulumi.Input[int]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = DatabaseKafkaConfigArgs.__new__(DatabaseKafkaConfigArgs) + + __props__.__dict__["auto_create_topics_enable"] = auto_create_topics_enable + if cluster_id is None and not opts.urn: + raise TypeError("Missing required property 'cluster_id'") + __props__.__dict__["cluster_id"] = cluster_id + __props__.__dict__["group_initial_rebalance_delay_ms"] = group_initial_rebalance_delay_ms + __props__.__dict__["group_max_session_timeout_ms"] = group_max_session_timeout_ms + __props__.__dict__["group_min_session_timeout_ms"] = group_min_session_timeout_ms + __props__.__dict__["log_cleaner_delete_retention_ms"] = log_cleaner_delete_retention_ms + __props__.__dict__["log_cleaner_min_compaction_lag_ms"] = log_cleaner_min_compaction_lag_ms + __props__.__dict__["log_flush_interval_ms"] = log_flush_interval_ms + __props__.__dict__["log_index_interval_bytes"] = log_index_interval_bytes + __props__.__dict__["log_message_downconversion_enable"] = log_message_downconversion_enable + __props__.__dict__["log_message_timestamp_difference_max_ms"] = log_message_timestamp_difference_max_ms + __props__.__dict__["log_preallocate"] = log_preallocate + __props__.__dict__["log_retention_bytes"] = log_retention_bytes + __props__.__dict__["log_retention_hours"] = log_retention_hours + __props__.__dict__["log_retention_ms"] = log_retention_ms + __props__.__dict__["log_roll_jitter_ms"] = log_roll_jitter_ms + __props__.__dict__["log_segment_delete_delay_ms"] = log_segment_delete_delay_ms + __props__.__dict__["message_max_bytes"] = message_max_bytes + super(DatabaseKafkaConfig, __self__).__init__( + 'digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + auto_create_topics_enable: Optional[pulumi.Input[bool]] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + group_initial_rebalance_delay_ms: Optional[pulumi.Input[int]] = None, + group_max_session_timeout_ms: Optional[pulumi.Input[int]] = None, + group_min_session_timeout_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_delete_retention_ms: Optional[pulumi.Input[int]] = None, + log_cleaner_min_compaction_lag_ms: Optional[pulumi.Input[str]] = None, + log_flush_interval_ms: Optional[pulumi.Input[str]] = None, + log_index_interval_bytes: Optional[pulumi.Input[int]] = None, + log_message_downconversion_enable: Optional[pulumi.Input[bool]] = None, + log_message_timestamp_difference_max_ms: Optional[pulumi.Input[str]] = None, + log_preallocate: Optional[pulumi.Input[bool]] = None, + log_retention_bytes: Optional[pulumi.Input[str]] = None, + log_retention_hours: Optional[pulumi.Input[int]] = None, + log_retention_ms: Optional[pulumi.Input[str]] = None, + log_roll_jitter_ms: Optional[pulumi.Input[str]] = None, + log_segment_delete_delay_ms: Optional[pulumi.Input[int]] = None, + message_max_bytes: Optional[pulumi.Input[int]] = None) -> 'DatabaseKafkaConfig': + """ + Get an existing DatabaseKafkaConfig resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics. + :param pulumi.Input[str] cluster_id: The ID of the target Kafka cluster. + :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? + :param pulumi.Input[str] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + :param pulumi.Input[str] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. + :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + :param pulumi.Input[str] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + :param pulumi.Input[bool] log_preallocate: Controls whether to preallocate a file when creating a new segment. + :param pulumi.Input[str] log_retention_bytes: The maximum size of the log before deleting messages. + :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it. + :param pulumi.Input[str] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + :param pulumi.Input[str] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. + :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _DatabaseKafkaConfigState.__new__(_DatabaseKafkaConfigState) + + __props__.__dict__["auto_create_topics_enable"] = auto_create_topics_enable + __props__.__dict__["cluster_id"] = cluster_id + __props__.__dict__["group_initial_rebalance_delay_ms"] = group_initial_rebalance_delay_ms + __props__.__dict__["group_max_session_timeout_ms"] = group_max_session_timeout_ms + __props__.__dict__["group_min_session_timeout_ms"] = group_min_session_timeout_ms + __props__.__dict__["log_cleaner_delete_retention_ms"] = log_cleaner_delete_retention_ms + __props__.__dict__["log_cleaner_min_compaction_lag_ms"] = log_cleaner_min_compaction_lag_ms + __props__.__dict__["log_flush_interval_ms"] = log_flush_interval_ms + __props__.__dict__["log_index_interval_bytes"] = log_index_interval_bytes + __props__.__dict__["log_message_downconversion_enable"] = log_message_downconversion_enable + __props__.__dict__["log_message_timestamp_difference_max_ms"] = log_message_timestamp_difference_max_ms + __props__.__dict__["log_preallocate"] = log_preallocate + __props__.__dict__["log_retention_bytes"] = log_retention_bytes + __props__.__dict__["log_retention_hours"] = log_retention_hours + __props__.__dict__["log_retention_ms"] = log_retention_ms + __props__.__dict__["log_roll_jitter_ms"] = log_roll_jitter_ms + __props__.__dict__["log_segment_delete_delay_ms"] = log_segment_delete_delay_ms + __props__.__dict__["message_max_bytes"] = message_max_bytes + return DatabaseKafkaConfig(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="autoCreateTopicsEnable") + def auto_create_topics_enable(self) -> pulumi.Output[bool]: + """ + Enable auto creation of topics. + """ + return pulumi.get(self, "auto_create_topics_enable") + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> pulumi.Output[str]: + """ + The ID of the target Kafka cluster. + """ + return pulumi.get(self, "cluster_id") + + @property + @pulumi.getter(name="groupInitialRebalanceDelayMs") + def group_initial_rebalance_delay_ms(self) -> pulumi.Output[int]: + """ + The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + """ + return pulumi.get(self, "group_initial_rebalance_delay_ms") + + @property + @pulumi.getter(name="groupMaxSessionTimeoutMs") + def group_max_session_timeout_ms(self) -> pulumi.Output[int]: + """ + The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_max_session_timeout_ms") + + @property + @pulumi.getter(name="groupMinSessionTimeoutMs") + def group_min_session_timeout_ms(self) -> pulumi.Output[int]: + """ + The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + """ + return pulumi.get(self, "group_min_session_timeout_ms") + + @property + @pulumi.getter(name="logCleanerDeleteRetentionMs") + def log_cleaner_delete_retention_ms(self) -> pulumi.Output[int]: + """ + How long are delete records retained? + """ + return pulumi.get(self, "log_cleaner_delete_retention_ms") + + @property + @pulumi.getter(name="logCleanerMinCompactionLagMs") + def log_cleaner_min_compaction_lag_ms(self) -> pulumi.Output[str]: + """ + The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + """ + return pulumi.get(self, "log_cleaner_min_compaction_lag_ms") + + @property + @pulumi.getter(name="logFlushIntervalMs") + def log_flush_interval_ms(self) -> pulumi.Output[str]: + """ + The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. + """ + return pulumi.get(self, "log_flush_interval_ms") + + @property + @pulumi.getter(name="logIndexIntervalBytes") + def log_index_interval_bytes(self) -> pulumi.Output[int]: + """ + The interval with which Kafka adds an entry to the offset index. + """ + return pulumi.get(self, "log_index_interval_bytes") + + @property + @pulumi.getter(name="logMessageDownconversionEnable") + def log_message_downconversion_enable(self) -> pulumi.Output[bool]: + """ + This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + """ + return pulumi.get(self, "log_message_downconversion_enable") + + @property + @pulumi.getter(name="logMessageTimestampDifferenceMaxMs") + def log_message_timestamp_difference_max_ms(self) -> pulumi.Output[str]: + """ + The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. + """ + return pulumi.get(self, "log_message_timestamp_difference_max_ms") + + @property + @pulumi.getter(name="logPreallocate") + def log_preallocate(self) -> pulumi.Output[bool]: + """ + Controls whether to preallocate a file when creating a new segment. + """ + return pulumi.get(self, "log_preallocate") + + @property + @pulumi.getter(name="logRetentionBytes") + def log_retention_bytes(self) -> pulumi.Output[str]: + """ + The maximum size of the log before deleting messages. + """ + return pulumi.get(self, "log_retention_bytes") + + @property + @pulumi.getter(name="logRetentionHours") + def log_retention_hours(self) -> pulumi.Output[int]: + """ + The number of hours to keep a log file before deleting it. + """ + return pulumi.get(self, "log_retention_hours") + + @property + @pulumi.getter(name="logRetentionMs") + def log_retention_ms(self) -> pulumi.Output[str]: + """ + The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + """ + return pulumi.get(self, "log_retention_ms") + + @property + @pulumi.getter(name="logRollJitterMs") + def log_roll_jitter_ms(self) -> pulumi.Output[str]: + """ + The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used. + """ + return pulumi.get(self, "log_roll_jitter_ms") + + @property + @pulumi.getter(name="logSegmentDeleteDelayMs") + def log_segment_delete_delay_ms(self) -> pulumi.Output[int]: + """ + The amount of time to wait before deleting a file from the filesystem. + """ + return pulumi.get(self, "log_segment_delete_delay_ms") + + @property + @pulumi.getter(name="messageMaxBytes") + def message_max_bytes(self) -> pulumi.Output[int]: + """ + The maximum size of message that the server can receive. + """ + return pulumi.get(self, "message_max_bytes") + diff --git a/sdk/python/pulumi_digitalocean/database_mongodb_config.py b/sdk/python/pulumi_digitalocean/database_mongodb_config.py new file mode 100644 index 00000000..7eb1a2ff --- /dev/null +++ b/sdk/python/pulumi_digitalocean/database_mongodb_config.py @@ -0,0 +1,447 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities + +__all__ = ['DatabaseMongodbConfigArgs', 'DatabaseMongodbConfig'] + +@pulumi.input_type +class DatabaseMongodbConfigArgs: + def __init__(__self__, *, + cluster_id: pulumi.Input[str], + default_read_concern: Optional[pulumi.Input[str]] = None, + default_write_concern: Optional[pulumi.Input[str]] = None, + slow_op_threshold_ms: Optional[pulumi.Input[int]] = None, + transaction_lifetime_limit_seconds: Optional[pulumi.Input[int]] = None, + verbosity: Optional[pulumi.Input[int]] = None): + """ + The set of arguments for constructing a DatabaseMongodbConfig resource. + :param pulumi.Input[str] cluster_id: The ID of the target MongoDB cluster. + :param pulumi.Input[str] default_read_concern: Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + :param pulumi.Input[str] default_write_concern: Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + :param pulumi.Input[int] slow_op_threshold_ms: Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + :param pulumi.Input[int] transaction_lifetime_limit_seconds: Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + :param pulumi.Input[int] verbosity: The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + pulumi.set(__self__, "cluster_id", cluster_id) + if default_read_concern is not None: + pulumi.set(__self__, "default_read_concern", default_read_concern) + if default_write_concern is not None: + pulumi.set(__self__, "default_write_concern", default_write_concern) + if slow_op_threshold_ms is not None: + pulumi.set(__self__, "slow_op_threshold_ms", slow_op_threshold_ms) + if transaction_lifetime_limit_seconds is not None: + pulumi.set(__self__, "transaction_lifetime_limit_seconds", transaction_lifetime_limit_seconds) + if verbosity is not None: + pulumi.set(__self__, "verbosity", verbosity) + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> pulumi.Input[str]: + """ + The ID of the target MongoDB cluster. + """ + return pulumi.get(self, "cluster_id") + + @cluster_id.setter + def cluster_id(self, value: pulumi.Input[str]): + pulumi.set(self, "cluster_id", value) + + @property + @pulumi.getter(name="defaultReadConcern") + def default_read_concern(self) -> Optional[pulumi.Input[str]]: + """ + Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + """ + return pulumi.get(self, "default_read_concern") + + @default_read_concern.setter + def default_read_concern(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "default_read_concern", value) + + @property + @pulumi.getter(name="defaultWriteConcern") + def default_write_concern(self) -> Optional[pulumi.Input[str]]: + """ + Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + """ + return pulumi.get(self, "default_write_concern") + + @default_write_concern.setter + def default_write_concern(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "default_write_concern", value) + + @property + @pulumi.getter(name="slowOpThresholdMs") + def slow_op_threshold_ms(self) -> Optional[pulumi.Input[int]]: + """ + Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + """ + return pulumi.get(self, "slow_op_threshold_ms") + + @slow_op_threshold_ms.setter + def slow_op_threshold_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "slow_op_threshold_ms", value) + + @property + @pulumi.getter(name="transactionLifetimeLimitSeconds") + def transaction_lifetime_limit_seconds(self) -> Optional[pulumi.Input[int]]: + """ + Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + """ + return pulumi.get(self, "transaction_lifetime_limit_seconds") + + @transaction_lifetime_limit_seconds.setter + def transaction_lifetime_limit_seconds(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "transaction_lifetime_limit_seconds", value) + + @property + @pulumi.getter + def verbosity(self) -> Optional[pulumi.Input[int]]: + """ + The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + return pulumi.get(self, "verbosity") + + @verbosity.setter + def verbosity(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "verbosity", value) + + +@pulumi.input_type +class _DatabaseMongodbConfigState: + def __init__(__self__, *, + cluster_id: Optional[pulumi.Input[str]] = None, + default_read_concern: Optional[pulumi.Input[str]] = None, + default_write_concern: Optional[pulumi.Input[str]] = None, + slow_op_threshold_ms: Optional[pulumi.Input[int]] = None, + transaction_lifetime_limit_seconds: Optional[pulumi.Input[int]] = None, + verbosity: Optional[pulumi.Input[int]] = None): + """ + Input properties used for looking up and filtering DatabaseMongodbConfig resources. + :param pulumi.Input[str] cluster_id: The ID of the target MongoDB cluster. + :param pulumi.Input[str] default_read_concern: Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + :param pulumi.Input[str] default_write_concern: Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + :param pulumi.Input[int] slow_op_threshold_ms: Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + :param pulumi.Input[int] transaction_lifetime_limit_seconds: Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + :param pulumi.Input[int] verbosity: The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + if cluster_id is not None: + pulumi.set(__self__, "cluster_id", cluster_id) + if default_read_concern is not None: + pulumi.set(__self__, "default_read_concern", default_read_concern) + if default_write_concern is not None: + pulumi.set(__self__, "default_write_concern", default_write_concern) + if slow_op_threshold_ms is not None: + pulumi.set(__self__, "slow_op_threshold_ms", slow_op_threshold_ms) + if transaction_lifetime_limit_seconds is not None: + pulumi.set(__self__, "transaction_lifetime_limit_seconds", transaction_lifetime_limit_seconds) + if verbosity is not None: + pulumi.set(__self__, "verbosity", verbosity) + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the target MongoDB cluster. + """ + return pulumi.get(self, "cluster_id") + + @cluster_id.setter + def cluster_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "cluster_id", value) + + @property + @pulumi.getter(name="defaultReadConcern") + def default_read_concern(self) -> Optional[pulumi.Input[str]]: + """ + Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + """ + return pulumi.get(self, "default_read_concern") + + @default_read_concern.setter + def default_read_concern(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "default_read_concern", value) + + @property + @pulumi.getter(name="defaultWriteConcern") + def default_write_concern(self) -> Optional[pulumi.Input[str]]: + """ + Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + """ + return pulumi.get(self, "default_write_concern") + + @default_write_concern.setter + def default_write_concern(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "default_write_concern", value) + + @property + @pulumi.getter(name="slowOpThresholdMs") + def slow_op_threshold_ms(self) -> Optional[pulumi.Input[int]]: + """ + Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + """ + return pulumi.get(self, "slow_op_threshold_ms") + + @slow_op_threshold_ms.setter + def slow_op_threshold_ms(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "slow_op_threshold_ms", value) + + @property + @pulumi.getter(name="transactionLifetimeLimitSeconds") + def transaction_lifetime_limit_seconds(self) -> Optional[pulumi.Input[int]]: + """ + Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + """ + return pulumi.get(self, "transaction_lifetime_limit_seconds") + + @transaction_lifetime_limit_seconds.setter + def transaction_lifetime_limit_seconds(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "transaction_lifetime_limit_seconds", value) + + @property + @pulumi.getter + def verbosity(self) -> Optional[pulumi.Input[int]]: + """ + The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + return pulumi.get(self, "verbosity") + + @verbosity.setter + def verbosity(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "verbosity", value) + + +class DatabaseMongodbConfig(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + default_read_concern: Optional[pulumi.Input[str]] = None, + default_write_concern: Optional[pulumi.Input[str]] = None, + slow_op_threshold_ms: Optional[pulumi.Input[int]] = None, + transaction_lifetime_limit_seconds: Optional[pulumi.Input[int]] = None, + verbosity: Optional[pulumi.Input[int]] = None, + __props__=None): + """ + Provides a virtual resource that can be used to change advanced configuration + options for a DigitalOcean managed MongoDB database cluster. + + > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. + + ## Example Usage + + ```python + import pulumi + import pulumi_digitalocean as digitalocean + + example_database_cluster = digitalocean.DatabaseCluster("example", + name="example-mongodb-cluster", + engine="mongodb", + version="7", + size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB, + region=digitalocean.Region.NYC3, + node_count=1) + example = digitalocean.DatabaseMongodbConfig("example", + cluster_id=example_database_cluster.id, + default_read_concern="majority", + default_write_concern="majority", + transaction_lifetime_limit_seconds=100, + slow_op_threshold_ms=100, + verbosity=3) + ``` + + ## Import + + A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. + + ```sh + $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] cluster_id: The ID of the target MongoDB cluster. + :param pulumi.Input[str] default_read_concern: Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + :param pulumi.Input[str] default_write_concern: Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + :param pulumi.Input[int] slow_op_threshold_ms: Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + :param pulumi.Input[int] transaction_lifetime_limit_seconds: Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + :param pulumi.Input[int] verbosity: The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: DatabaseMongodbConfigArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Provides a virtual resource that can be used to change advanced configuration + options for a DigitalOcean managed MongoDB database cluster. + + > **Note** MongoDB configurations are only removed from state when destroyed. The remote configuration is not unset. + + ## Example Usage + + ```python + import pulumi + import pulumi_digitalocean as digitalocean + + example_database_cluster = digitalocean.DatabaseCluster("example", + name="example-mongodb-cluster", + engine="mongodb", + version="7", + size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB, + region=digitalocean.Region.NYC3, + node_count=1) + example = digitalocean.DatabaseMongodbConfig("example", + cluster_id=example_database_cluster.id, + default_read_concern="majority", + default_write_concern="majority", + transaction_lifetime_limit_seconds=100, + slow_op_threshold_ms=100, + verbosity=3) + ``` + + ## Import + + A MongoDB database cluster's configuration can be imported using the `id` the parent cluster, e.g. + + ```sh + $ pulumi import digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig example 4b62829a-9c42-465b-aaa3-84051048e712 + ``` + + :param str resource_name: The name of the resource. + :param DatabaseMongodbConfigArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(DatabaseMongodbConfigArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + default_read_concern: Optional[pulumi.Input[str]] = None, + default_write_concern: Optional[pulumi.Input[str]] = None, + slow_op_threshold_ms: Optional[pulumi.Input[int]] = None, + transaction_lifetime_limit_seconds: Optional[pulumi.Input[int]] = None, + verbosity: Optional[pulumi.Input[int]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = DatabaseMongodbConfigArgs.__new__(DatabaseMongodbConfigArgs) + + if cluster_id is None and not opts.urn: + raise TypeError("Missing required property 'cluster_id'") + __props__.__dict__["cluster_id"] = cluster_id + __props__.__dict__["default_read_concern"] = default_read_concern + __props__.__dict__["default_write_concern"] = default_write_concern + __props__.__dict__["slow_op_threshold_ms"] = slow_op_threshold_ms + __props__.__dict__["transaction_lifetime_limit_seconds"] = transaction_lifetime_limit_seconds + __props__.__dict__["verbosity"] = verbosity + super(DatabaseMongodbConfig, __self__).__init__( + 'digitalocean:index/databaseMongodbConfig:DatabaseMongodbConfig', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + cluster_id: Optional[pulumi.Input[str]] = None, + default_read_concern: Optional[pulumi.Input[str]] = None, + default_write_concern: Optional[pulumi.Input[str]] = None, + slow_op_threshold_ms: Optional[pulumi.Input[int]] = None, + transaction_lifetime_limit_seconds: Optional[pulumi.Input[int]] = None, + verbosity: Optional[pulumi.Input[int]] = None) -> 'DatabaseMongodbConfig': + """ + Get an existing DatabaseMongodbConfig resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] cluster_id: The ID of the target MongoDB cluster. + :param pulumi.Input[str] default_read_concern: Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + :param pulumi.Input[str] default_write_concern: Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + :param pulumi.Input[int] slow_op_threshold_ms: Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + :param pulumi.Input[int] transaction_lifetime_limit_seconds: Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + :param pulumi.Input[int] verbosity: The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _DatabaseMongodbConfigState.__new__(_DatabaseMongodbConfigState) + + __props__.__dict__["cluster_id"] = cluster_id + __props__.__dict__["default_read_concern"] = default_read_concern + __props__.__dict__["default_write_concern"] = default_write_concern + __props__.__dict__["slow_op_threshold_ms"] = slow_op_threshold_ms + __props__.__dict__["transaction_lifetime_limit_seconds"] = transaction_lifetime_limit_seconds + __props__.__dict__["verbosity"] = verbosity + return DatabaseMongodbConfig(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="clusterId") + def cluster_id(self) -> pulumi.Output[str]: + """ + The ID of the target MongoDB cluster. + """ + return pulumi.get(self, "cluster_id") + + @property + @pulumi.getter(name="defaultReadConcern") + def default_read_concern(self) -> pulumi.Output[str]: + """ + Specifies the default consistency behavior of reads from the database. Data that is returned from the query with may or may not have been acknowledged by all nodes in the replicaset depending on this value. Learn more [here](https://www.mongodb.com/docs/manual/reference/read-concern/). + """ + return pulumi.get(self, "default_read_concern") + + @property + @pulumi.getter(name="defaultWriteConcern") + def default_write_concern(self) -> pulumi.Output[str]: + """ + Describes the level of acknowledgment requested from MongoDB for write operations clusters. This field can set to either `majority` or a number`0...n` which will describe the number of nodes that must acknowledge the write operation before it is fully accepted. Setting to `0` will request no acknowledgement of the write operation. Learn more [here](https://www.mongodb.com/docs/manual/reference/write-concern/). + """ + return pulumi.get(self, "default_write_concern") + + @property + @pulumi.getter(name="slowOpThresholdMs") + def slow_op_threshold_ms(self) -> pulumi.Output[int]: + """ + Operations that run for longer than this threshold are considered slow which are then recorded to the diagnostic logs. Higher log levels (verbosity) will record all operations regardless of this threshold on the primary node. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-operationProfiling.slowOpThresholdMs). + """ + return pulumi.get(self, "slow_op_threshold_ms") + + @property + @pulumi.getter(name="transactionLifetimeLimitSeconds") + def transaction_lifetime_limit_seconds(self) -> pulumi.Output[int]: + """ + Specifies the lifetime of multi-document transactions. Transactions that exceed this limit are considered expired and will be aborted by a periodic cleanup process. The cleanup process runs every `transactionLifetimeLimitSeconds/2 seconds` or at least once every 60 seconds. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.transactionLifetimeLimitSeconds). + """ + return pulumi.get(self, "transaction_lifetime_limit_seconds") + + @property + @pulumi.getter + def verbosity(self) -> pulumi.Output[int]: + """ + The log message verbosity level. The verbosity level determines the amount of Informational and Debug messages MongoDB outputs. 0 includes informational messages while 1...5 increases the level to include debug messages. Changing this parameter will lead to a restart of the MongoDB service. Learn more [here](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-systemLog.verbosity). + """ + return pulumi.get(self, "verbosity") + diff --git a/sdk/python/pulumi_digitalocean/outputs.py b/sdk/python/pulumi_digitalocean/outputs.py index 0a246b1d..bad84252 100644 --- a/sdk/python/pulumi_digitalocean/outputs.py +++ b/sdk/python/pulumi_digitalocean/outputs.py @@ -107,6 +107,7 @@ 'DatabasePostgresqlConfigTimescaledb', 'DatabaseUserSetting', 'DatabaseUserSettingAcl', + 'DatabaseUserSettingOpensearchAcl', 'FirewallInboundRule', 'FirewallOutboundRule', 'FirewallPendingChange', @@ -6549,8 +6550,26 @@ def max_background_workers(self) -> Optional[int]: @pulumi.output_type class DatabaseUserSetting(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "opensearchAcls": + suggest = "opensearch_acls" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in DatabaseUserSetting. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + DatabaseUserSetting.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + DatabaseUserSetting.__key_warning(key) + return super().get(key, default) + def __init__(__self__, *, - acls: Optional[Sequence['outputs.DatabaseUserSettingAcl']] = None): + acls: Optional[Sequence['outputs.DatabaseUserSettingAcl']] = None, + opensearch_acls: Optional[Sequence['outputs.DatabaseUserSettingOpensearchAcl']] = None): """ :param Sequence['DatabaseUserSettingAclArgs'] acls: A set of ACLs (Access Control Lists) specifying permission on topics with a Kafka cluster. The properties of an individual ACL are described below: @@ -6558,6 +6577,8 @@ def __init__(__self__, *, """ if acls is not None: pulumi.set(__self__, "acls", acls) + if opensearch_acls is not None: + pulumi.set(__self__, "opensearch_acls", opensearch_acls) @property @pulumi.getter @@ -6569,6 +6590,11 @@ def acls(self) -> Optional[Sequence['outputs.DatabaseUserSettingAcl']]: """ return pulumi.get(self, "acls") + @property + @pulumi.getter(name="opensearchAcls") + def opensearch_acls(self) -> Optional[Sequence['outputs.DatabaseUserSettingOpensearchAcl']]: + return pulumi.get(self, "opensearch_acls") + @pulumi.output_type class DatabaseUserSettingAcl(dict): @@ -6611,6 +6637,31 @@ def id(self) -> Optional[str]: return pulumi.get(self, "id") +@pulumi.output_type +class DatabaseUserSettingOpensearchAcl(dict): + def __init__(__self__, *, + index: str, + permission: str): + """ + :param str permission: The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + """ + pulumi.set(__self__, "index", index) + pulumi.set(__self__, "permission", permission) + + @property + @pulumi.getter + def index(self) -> str: + return pulumi.get(self, "index") + + @property + @pulumi.getter + def permission(self) -> str: + """ + The permission level applied to the ACL. This includes "admin", "consume", "produce", and "produceconsume". "admin" allows for producing and consuming as well as add/delete/update permission for topics. "consume" allows only for reading topic messages. "produce" allows only for writing topic messages. "produceconsume" allows for both reading and writing topic messages. + """ + return pulumi.get(self, "permission") + + @pulumi.output_type class FirewallInboundRule(dict): @staticmethod diff --git a/upstream b/upstream index 9e37c59d..d0852b5a 160000 --- a/upstream +++ b/upstream @@ -1 +1 @@ -Subproject commit 9e37c59d7ca959ad48f517a70e7568a487f04d1f +Subproject commit d0852b5accc7a19f56002434b52fe59652512e5e