1. Packages
  2. Confluent Provider
  3. API Docs
  4. TableflowTopic
Confluent v2.22.0 published on Friday, Mar 28, 2025 by Pulumi

confluentcloud.TableflowTopic

Explore with Pulumi AI

confluentcloud logo
Confluent v2.22.0 published on Friday, Mar 28, 2025 by Pulumi

    Example Usage

    Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const example = new confluentcloud.TableflowTopic("example", {
        managedStorages: [{}],
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: stagingConfluentKafkaCluster.id,
        },
        displayName: orders.topicName,
        tableFormats: [
            "ICEBERG",
            "DELTA",
        ],
        credentials: {
            key: env_admin_tableflow_api_key.id,
            secret: env_admin_tableflow_api_key.secret,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    example = confluentcloud.TableflowTopic("example",
        managed_storages=[{}],
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": staging_confluent_kafka_cluster["id"],
        },
        display_name=orders["topicName"],
        table_formats=[
            "ICEBERG",
            "DELTA",
        ],
        credentials={
            "key": env_admin_tableflow_api_key["id"],
            "secret": env_admin_tableflow_api_key["secret"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewTableflowTopic(ctx, "example", &confluentcloud.TableflowTopicArgs{
    			ManagedStorages: confluentcloud.TableflowTopicManagedStorageArray{
    				&confluentcloud.TableflowTopicManagedStorageArgs{},
    			},
    			Environment: &confluentcloud.TableflowTopicEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.TableflowTopicKafkaClusterArgs{
    				Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
    			},
    			DisplayName: pulumi.Any(orders.TopicName),
    			TableFormats: pulumi.StringArray{
    				pulumi.String("ICEBERG"),
    				pulumi.String("DELTA"),
    			},
    			Credentials: &confluentcloud.TableflowTopicCredentialsArgs{
    				Key:    pulumi.Any(env_admin_tableflow_api_key.Id),
    				Secret: pulumi.Any(env_admin_tableflow_api_key.Secret),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new ConfluentCloud.TableflowTopic("example", new()
        {
            ManagedStorages = new[]
            {
                null,
            },
            Environment = new ConfluentCloud.Inputs.TableflowTopicEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.TableflowTopicKafkaClusterArgs
            {
                Id = stagingConfluentKafkaCluster.Id,
            },
            DisplayName = orders.TopicName,
            TableFormats = new[]
            {
                "ICEBERG",
                "DELTA",
            },
            Credentials = new ConfluentCloud.Inputs.TableflowTopicCredentialsArgs
            {
                Key = env_admin_tableflow_api_key.Id,
                Secret = env_admin_tableflow_api_key.Secret,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.TableflowTopic;
    import com.pulumi.confluentcloud.TableflowTopicArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicManagedStorageArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicKafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicCredentialsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new TableflowTopic("example", TableflowTopicArgs.builder()
                .managedStorages()
                .environment(TableflowTopicEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(TableflowTopicKafkaClusterArgs.builder()
                    .id(stagingConfluentKafkaCluster.id())
                    .build())
                .displayName(orders.topicName())
                .tableFormats(            
                    "ICEBERG",
                    "DELTA")
                .credentials(TableflowTopicCredentialsArgs.builder()
                    .key(env_admin_tableflow_api_key.id())
                    .secret(env_admin_tableflow_api_key.secret())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: confluentcloud:TableflowTopic
        properties:
          managedStorages:
            - {}
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${stagingConfluentKafkaCluster.id}
          displayName: ${orders.topicName}
          tableFormats:
            - ICEBERG
            - DELTA
          credentials:
            key: ${["env-admin-tableflow-api-key"].id}
            secret: ${["env-admin-tableflow-api-key"].secret}
    

    Option #2: Manage a single Tableflow Topic in the same Pulumi Stack

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const example = new confluentcloud.TableflowTopic("example", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: stagingConfluentKafkaCluster.id,
        },
        displayName: orders.topicName,
        byobAws: {
            bucketName: "bucket_1",
            providerIntegrationId: main.id,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    example = confluentcloud.TableflowTopic("example",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": staging_confluent_kafka_cluster["id"],
        },
        display_name=orders["topicName"],
        byob_aws={
            "bucket_name": "bucket_1",
            "provider_integration_id": main["id"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewTableflowTopic(ctx, "example", &confluentcloud.TableflowTopicArgs{
    			Environment: &confluentcloud.TableflowTopicEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.TableflowTopicKafkaClusterArgs{
    				Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
    			},
    			DisplayName: pulumi.Any(orders.TopicName),
    			ByobAws: &confluentcloud.TableflowTopicByobAwsArgs{
    				BucketName:            pulumi.String("bucket_1"),
    				ProviderIntegrationId: pulumi.Any(main.Id),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new ConfluentCloud.TableflowTopic("example", new()
        {
            Environment = new ConfluentCloud.Inputs.TableflowTopicEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.TableflowTopicKafkaClusterArgs
            {
                Id = stagingConfluentKafkaCluster.Id,
            },
            DisplayName = orders.TopicName,
            ByobAws = new ConfluentCloud.Inputs.TableflowTopicByobAwsArgs
            {
                BucketName = "bucket_1",
                ProviderIntegrationId = main.Id,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.TableflowTopic;
    import com.pulumi.confluentcloud.TableflowTopicArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicKafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.TableflowTopicByobAwsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new TableflowTopic("example", TableflowTopicArgs.builder()
                .environment(TableflowTopicEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(TableflowTopicKafkaClusterArgs.builder()
                    .id(stagingConfluentKafkaCluster.id())
                    .build())
                .displayName(orders.topicName())
                .byobAws(TableflowTopicByobAwsArgs.builder()
                    .bucketName("bucket_1")
                    .providerIntegrationId(main.id())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: confluentcloud:TableflowTopic
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${stagingConfluentKafkaCluster.id}
          displayName: ${orders.topicName}
          byobAws:
            bucketName: bucket_1
            providerIntegrationId: ${main.id}
    

    Getting Started

    The following end-to-end examples might help to get started with confluentcloud.TableflowTopic resource:

    • confluent-managed-storage: Tableflow topic with Confluent-managed storage.
    • byob-aws-storage: Tableflow topic with custom (BYOB AWS) storage.

    Create TableflowTopic Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new TableflowTopic(name: string, args: TableflowTopicArgs, opts?: CustomResourceOptions);
    @overload
    def TableflowTopic(resource_name: str,
                       args: TableflowTopicArgs,
                       opts: Optional[ResourceOptions] = None)
    
    @overload
    def TableflowTopic(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       display_name: Optional[str] = None,
                       environment: Optional[TableflowTopicEnvironmentArgs] = None,
                       kafka_cluster: Optional[TableflowTopicKafkaClusterArgs] = None,
                       byob_aws: Optional[TableflowTopicByobAwsArgs] = None,
                       credentials: Optional[TableflowTopicCredentialsArgs] = None,
                       managed_storages: Optional[Sequence[TableflowTopicManagedStorageArgs]] = None,
                       record_failure_strategy: Optional[str] = None,
                       retention_ms: Optional[str] = None,
                       table_formats: Optional[Sequence[str]] = None)
    func NewTableflowTopic(ctx *Context, name string, args TableflowTopicArgs, opts ...ResourceOption) (*TableflowTopic, error)
    public TableflowTopic(string name, TableflowTopicArgs args, CustomResourceOptions? opts = null)
    public TableflowTopic(String name, TableflowTopicArgs args)
    public TableflowTopic(String name, TableflowTopicArgs args, CustomResourceOptions options)
    
    type: confluentcloud:TableflowTopic
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TableflowTopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TableflowTopicArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TableflowTopicArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TableflowTopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TableflowTopicArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var tableflowTopicResource = new ConfluentCloud.TableflowTopic("tableflowTopicResource", new()
    {
        DisplayName = "string",
        Environment = new ConfluentCloud.Inputs.TableflowTopicEnvironmentArgs
        {
            Id = "string",
        },
        KafkaCluster = new ConfluentCloud.Inputs.TableflowTopicKafkaClusterArgs
        {
            Id = "string",
        },
        ByobAws = new ConfluentCloud.Inputs.TableflowTopicByobAwsArgs
        {
            BucketName = "string",
            ProviderIntegrationId = "string",
            BucketRegion = "string",
        },
        Credentials = new ConfluentCloud.Inputs.TableflowTopicCredentialsArgs
        {
            Key = "string",
            Secret = "string",
        },
        ManagedStorages = new[]
        {
            null,
        },
        RecordFailureStrategy = "string",
        RetentionMs = "string",
        TableFormats = new[]
        {
            "string",
        },
    });
    
    example, err := confluentcloud.NewTableflowTopic(ctx, "tableflowTopicResource", &confluentcloud.TableflowTopicArgs{
    	DisplayName: pulumi.String("string"),
    	Environment: &confluentcloud.TableflowTopicEnvironmentArgs{
    		Id: pulumi.String("string"),
    	},
    	KafkaCluster: &confluentcloud.TableflowTopicKafkaClusterArgs{
    		Id: pulumi.String("string"),
    	},
    	ByobAws: &confluentcloud.TableflowTopicByobAwsArgs{
    		BucketName:            pulumi.String("string"),
    		ProviderIntegrationId: pulumi.String("string"),
    		BucketRegion:          pulumi.String("string"),
    	},
    	Credentials: &confluentcloud.TableflowTopicCredentialsArgs{
    		Key:    pulumi.String("string"),
    		Secret: pulumi.String("string"),
    	},
    	ManagedStorages: confluentcloud.TableflowTopicManagedStorageArray{
    		&confluentcloud.TableflowTopicManagedStorageArgs{},
    	},
    	RecordFailureStrategy: pulumi.String("string"),
    	RetentionMs:           pulumi.String("string"),
    	TableFormats: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    })
    
    var tableflowTopicResource = new TableflowTopic("tableflowTopicResource", TableflowTopicArgs.builder()
        .displayName("string")
        .environment(TableflowTopicEnvironmentArgs.builder()
            .id("string")
            .build())
        .kafkaCluster(TableflowTopicKafkaClusterArgs.builder()
            .id("string")
            .build())
        .byobAws(TableflowTopicByobAwsArgs.builder()
            .bucketName("string")
            .providerIntegrationId("string")
            .bucketRegion("string")
            .build())
        .credentials(TableflowTopicCredentialsArgs.builder()
            .key("string")
            .secret("string")
            .build())
        .managedStorages()
        .recordFailureStrategy("string")
        .retentionMs("string")
        .tableFormats("string")
        .build());
    
    tableflow_topic_resource = confluentcloud.TableflowTopic("tableflowTopicResource",
        display_name="string",
        environment={
            "id": "string",
        },
        kafka_cluster={
            "id": "string",
        },
        byob_aws={
            "bucket_name": "string",
            "provider_integration_id": "string",
            "bucket_region": "string",
        },
        credentials={
            "key": "string",
            "secret": "string",
        },
        managed_storages=[{}],
        record_failure_strategy="string",
        retention_ms="string",
        table_formats=["string"])
    
    const tableflowTopicResource = new confluentcloud.TableflowTopic("tableflowTopicResource", {
        displayName: "string",
        environment: {
            id: "string",
        },
        kafkaCluster: {
            id: "string",
        },
        byobAws: {
            bucketName: "string",
            providerIntegrationId: "string",
            bucketRegion: "string",
        },
        credentials: {
            key: "string",
            secret: "string",
        },
        managedStorages: [{}],
        recordFailureStrategy: "string",
        retentionMs: "string",
        tableFormats: ["string"],
    });
    
    type: confluentcloud:TableflowTopic
    properties:
        byobAws:
            bucketName: string
            bucketRegion: string
            providerIntegrationId: string
        credentials:
            key: string
            secret: string
        displayName: string
        environment:
            id: string
        kafkaCluster:
            id: string
        managedStorages:
            - {}
        recordFailureStrategy: string
        retentionMs: string
        tableFormats:
            - string
    

    TableflowTopic Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The TableflowTopic resource accepts the following input properties:

    DisplayName string
    The name of the Kafka topic for which Tableflow is enabled.
    Environment Pulumi.ConfluentCloud.Inputs.TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.TableflowTopicKafkaCluster
    ByobAws Pulumi.ConfluentCloud.Inputs.TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    Credentials Pulumi.ConfluentCloud.Inputs.TableflowTopicCredentials
    The Cluster API Credentials.
    ManagedStorages List<Pulumi.ConfluentCloud.Inputs.TableflowTopicManagedStorage>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    RecordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    RetentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    TableFormats List<string>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    DisplayName string
    The name of the Kafka topic for which Tableflow is enabled.
    Environment TableflowTopicEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster TableflowTopicKafkaClusterArgs
    ByobAws TableflowTopicByobAwsArgs
    supports the following (See Quick Start with Custom Storage for more details):
    Credentials TableflowTopicCredentialsArgs
    The Cluster API Credentials.
    ManagedStorages []TableflowTopicManagedStorageArgs
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    RecordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    RetentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    TableFormats []string
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    displayName String
    The name of the Kafka topic for which Tableflow is enabled.
    environment TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster TableflowTopicKafkaCluster
    byobAws TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentials
    The Cluster API Credentials.
    managedStorages List<TableflowTopicManagedStorage>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy String
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs String
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    tableFormats List<String>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    displayName string
    The name of the Kafka topic for which Tableflow is enabled.
    environment TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster TableflowTopicKafkaCluster
    byobAws TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentials
    The Cluster API Credentials.
    managedStorages TableflowTopicManagedStorage[]
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    tableFormats string[]
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    display_name str
    The name of the Kafka topic for which Tableflow is enabled.
    environment TableflowTopicEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster TableflowTopicKafkaClusterArgs
    byob_aws TableflowTopicByobAwsArgs
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentialsArgs
    The Cluster API Credentials.
    managed_storages Sequence[TableflowTopicManagedStorageArgs]
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    record_failure_strategy str
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retention_ms str
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    table_formats Sequence[str]
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    displayName String
    The name of the Kafka topic for which Tableflow is enabled.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    byobAws Property Map
    supports the following (See Quick Start with Custom Storage for more details):
    credentials Property Map
    The Cluster API Credentials.
    managedStorages List<Property Map>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy String
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs String
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    tableFormats List<String>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the TableflowTopic resource produces the following output properties:

    EnableCompaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    EnablePartitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    Id string
    The provider-assigned unique ID for this managed resource.
    Suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    EnableCompaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    EnablePartitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    Id string
    The provider-assigned unique ID for this managed resource.
    Suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    enableCompaction Boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning Boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    id String
    The provider-assigned unique ID for this managed resource.
    suspended Boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    enableCompaction boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    id string
    The provider-assigned unique ID for this managed resource.
    suspended boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    enable_compaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enable_partitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    id str
    The provider-assigned unique ID for this managed resource.
    suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    enableCompaction Boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning Boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    id String
    The provider-assigned unique ID for this managed resource.
    suspended Boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.

    Look up Existing TableflowTopic Resource

    Get an existing TableflowTopic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TableflowTopicState, opts?: CustomResourceOptions): TableflowTopic
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            byob_aws: Optional[TableflowTopicByobAwsArgs] = None,
            credentials: Optional[TableflowTopicCredentialsArgs] = None,
            display_name: Optional[str] = None,
            enable_compaction: Optional[bool] = None,
            enable_partitioning: Optional[bool] = None,
            environment: Optional[TableflowTopicEnvironmentArgs] = None,
            kafka_cluster: Optional[TableflowTopicKafkaClusterArgs] = None,
            managed_storages: Optional[Sequence[TableflowTopicManagedStorageArgs]] = None,
            record_failure_strategy: Optional[str] = None,
            retention_ms: Optional[str] = None,
            suspended: Optional[bool] = None,
            table_formats: Optional[Sequence[str]] = None) -> TableflowTopic
    func GetTableflowTopic(ctx *Context, name string, id IDInput, state *TableflowTopicState, opts ...ResourceOption) (*TableflowTopic, error)
    public static TableflowTopic Get(string name, Input<string> id, TableflowTopicState? state, CustomResourceOptions? opts = null)
    public static TableflowTopic get(String name, Output<String> id, TableflowTopicState state, CustomResourceOptions options)
    resources:  _:    type: confluentcloud:TableflowTopic    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ByobAws Pulumi.ConfluentCloud.Inputs.TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    Credentials Pulumi.ConfluentCloud.Inputs.TableflowTopicCredentials
    The Cluster API Credentials.
    DisplayName string
    The name of the Kafka topic for which Tableflow is enabled.
    EnableCompaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    EnablePartitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    Environment Pulumi.ConfluentCloud.Inputs.TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.TableflowTopicKafkaCluster
    ManagedStorages List<Pulumi.ConfluentCloud.Inputs.TableflowTopicManagedStorage>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    RecordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    RetentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    Suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    TableFormats List<string>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    ByobAws TableflowTopicByobAwsArgs
    supports the following (See Quick Start with Custom Storage for more details):
    Credentials TableflowTopicCredentialsArgs
    The Cluster API Credentials.
    DisplayName string
    The name of the Kafka topic for which Tableflow is enabled.
    EnableCompaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    EnablePartitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    Environment TableflowTopicEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster TableflowTopicKafkaClusterArgs
    ManagedStorages []TableflowTopicManagedStorageArgs
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    RecordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    RetentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    Suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    TableFormats []string
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    byobAws TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentials
    The Cluster API Credentials.
    displayName String
    The name of the Kafka topic for which Tableflow is enabled.
    enableCompaction Boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning Boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    environment TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster TableflowTopicKafkaCluster
    managedStorages List<TableflowTopicManagedStorage>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy String
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs String
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    suspended Boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    tableFormats List<String>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    byobAws TableflowTopicByobAws
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentials
    The Cluster API Credentials.
    displayName string
    The name of the Kafka topic for which Tableflow is enabled.
    enableCompaction boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    environment TableflowTopicEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster TableflowTopicKafkaCluster
    managedStorages TableflowTopicManagedStorage[]
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy string
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs string
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    suspended boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    tableFormats string[]
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    byob_aws TableflowTopicByobAwsArgs
    supports the following (See Quick Start with Custom Storage for more details):
    credentials TableflowTopicCredentialsArgs
    The Cluster API Credentials.
    display_name str
    The name of the Kafka topic for which Tableflow is enabled.
    enable_compaction bool
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enable_partitioning bool
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    environment TableflowTopicEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster TableflowTopicKafkaClusterArgs
    managed_storages Sequence[TableflowTopicManagedStorageArgs]
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    record_failure_strategy str
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retention_ms str
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    suspended bool
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    table_formats Sequence[str]
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.
    byobAws Property Map
    supports the following (See Quick Start with Custom Storage for more details):
    credentials Property Map
    The Cluster API Credentials.
    displayName String
    The name of the Kafka topic for which Tableflow is enabled.
    enableCompaction Boolean
    (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
    enablePartitioning Boolean
    (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    managedStorages List<Property Map>
    The configuration of the Confluent managed storage. See Quick Start with Managed Storage for more details.
    recordFailureStrategy String
    The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are SKIP, SUSPEND. For SKIP, we skip the bad records and move to the next record. For SUSPEND, we suspend the materialization of the topic.
    retentionMs String
    The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
    suspended Boolean
    (Optional Boolean) Indicates whether the Tableflow should be suspended.
    tableFormats List<String>
    The supported table formats for the Tableflow-enabled topic. Accepted values are DELTA, ICEBERG.

    Supporting Types

    TableflowTopicByobAws, TableflowTopicByobAwsArgs

    BucketName string
    The bucket name.
    ProviderIntegrationId string
    The provider integration id.
    BucketRegion string
    (Required String) The bucket region.
    BucketName string
    The bucket name.
    ProviderIntegrationId string
    The provider integration id.
    BucketRegion string
    (Required String) The bucket region.
    bucketName String
    The bucket name.
    providerIntegrationId String
    The provider integration id.
    bucketRegion String
    (Required String) The bucket region.
    bucketName string
    The bucket name.
    providerIntegrationId string
    The provider integration id.
    bucketRegion string
    (Required String) The bucket region.
    bucket_name str
    The bucket name.
    provider_integration_id str
    The provider integration id.
    bucket_region str
    (Required String) The bucket region.
    bucketName String
    The bucket name.
    providerIntegrationId String
    The provider integration id.
    bucketRegion String
    (Required String) The bucket region.

    TableflowTopicCredentials, TableflowTopicCredentialsArgs

    Key string
    The Tableflow API Key.
    Secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    Key string
    The Tableflow API Key.
    Secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    key String
    The Tableflow API Key.
    secret String
    The Cluster API Secret for your Confluent Cloud cluster.
    key string
    The Tableflow API Key.
    secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    key str
    The Tableflow API Key.
    secret str
    The Cluster API Secret for your Confluent Cloud cluster.
    key String
    The Tableflow API Key.
    secret String
    The Cluster API Secret for your Confluent Cloud cluster.

    TableflowTopicEnvironment, TableflowTopicEnvironmentArgs

    Id string
    The ID of the Environment, for example, env-abc123.
    Id string
    The ID of the Environment, for example, env-abc123.
    id String
    The ID of the Environment, for example, env-abc123.
    id string
    The ID of the Environment, for example, env-abc123.
    id str
    The ID of the Environment, for example, env-abc123.
    id String
    The ID of the Environment, for example, env-abc123.

    TableflowTopicKafkaCluster, TableflowTopicKafkaClusterArgs

    Id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    Id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster, for example, lkc-abc123.
    id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    id str
    The ID of the Kafka cluster, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster, for example, lkc-abc123.

    Import

    You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format <Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>, for example:

    Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack

    $ export IMPORT_TABLEFLOW_API_KEY="<tableflow_api_key>"

    $ export IMPORT_TABLEFLOW_API_SECRET="<tableflow_api_secret>"

    $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
    

    Option #2: Manage a single Tableflow Topic in the same Pulumi Stack

    $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
    

    !> Warning: Do not forget to delete terminal command history afterwards for security purposes.

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Confluent Cloud pulumi/pulumi-confluentcloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the confluent Terraform Provider.
    confluentcloud logo
    Confluent v2.22.0 published on Friday, Mar 28, 2025 by Pulumi