|
| 1 | +--- |
| 2 | +# generated by https://github.com/hashicorp/terraform-plugin-docs |
| 3 | +page_title: "clickhouse_clickpipe Resource - clickhouse" |
| 4 | +subcategory: "" |
| 5 | +description: |- |
| 6 | + This experimental resource allows you to create and manage ClickPipes data ingestion in ClickHouse Cloud. |
| 7 | + Feature needs to be enabled on your account. Please contact ClickHouse Cloud support for more information. |
| 8 | + Resource is early access and may change in future releases. Feature coverage might not fully cover all ClickPipe capabilities. |
| 9 | + Known limitations: |
| 10 | + ClickPipe does not support table updates for managed tables. If you need to update the table schema, you will have to do that externally. |
| 11 | +--- |
| 12 | + |
| 13 | +# clickhouse_clickpipe (Resource) |
| 14 | + |
| 15 | +This experimental resource allows you to create and manage ClickPipes data ingestion in ClickHouse Cloud. |
| 16 | + |
| 17 | +Feature needs to be enabled on your account. Please contact ClickHouse Cloud support for more information. |
| 18 | + |
| 19 | +**Resource is early access and may change in future releases. Feature coverage might not fully cover all ClickPipe capabilities.** |
| 20 | + |
| 21 | +Known limitations: |
| 22 | +- ClickPipe does not support table updates for managed tables. If you need to update the table schema, you will have to do that externally. |
| 23 | + |
| 24 | +## Example Usage |
| 25 | + |
| 26 | +```terraform |
| 27 | +resource "clickhouse_clickpipe" "kafka_clickpipe" { |
| 28 | + name = "My Kafka ClickPipe" |
| 29 | + description = "Data pipeline from Kafka to ClickHouse" |
| 30 | +
|
| 31 | + service_id = "e9465b4b-f7e5-4937-8e21-8d508b02843d" |
| 32 | +
|
| 33 | + scaling { |
| 34 | + replicas = 1 |
| 35 | + } |
| 36 | +
|
| 37 | + state = "Running" |
| 38 | +
|
| 39 | + source { |
| 40 | + kafka { |
| 41 | + type = "confluent" |
| 42 | + format = "JSONEachRow" |
| 43 | + brokers = "my-kafka-broker:9092" |
| 44 | + topics = "my_topic" |
| 45 | +
|
| 46 | + consumer_group = "clickpipe-test" |
| 47 | +
|
| 48 | + credentials { |
| 49 | + username = "user" |
| 50 | + password = "***" |
| 51 | + } |
| 52 | + } |
| 53 | + } |
| 54 | +
|
| 55 | + destination { |
| 56 | + table = "my_table" |
| 57 | + managed_table = true |
| 58 | + |
| 59 | + tableDefinition { |
| 60 | + engine { |
| 61 | + type = "MergeTree" |
| 62 | + } |
| 63 | + } |
| 64 | +
|
| 65 | + columns { |
| 66 | + name = "my_field1" |
| 67 | + type = "String" |
| 68 | + } |
| 69 | +
|
| 70 | + columns { |
| 71 | + name = "my_field2" |
| 72 | + type = "UInt64" |
| 73 | + } |
| 74 | + } |
| 75 | +
|
| 76 | + field_mappings = [ |
| 77 | + { |
| 78 | + source_field = "my_field" |
| 79 | + destination_field = "my_field1" |
| 80 | + } |
| 81 | + ] |
| 82 | +} |
| 83 | +``` |
| 84 | + |
| 85 | +<!-- schema generated by tfplugindocs --> |
| 86 | +## Schema |
| 87 | + |
| 88 | +### Required |
| 89 | + |
| 90 | +- `destination` (Attributes) The destination for the ClickPipe. (see [below for nested schema](#nestedatt--destination)) |
| 91 | +- `name` (String) The name of the ClickPipe. |
| 92 | +- `service_id` (String) The ID of the service to which the ClickPipe belongs. |
| 93 | +- `source` (Attributes) The data source for the ClickPipe. At least one source configuration must be provided. (see [below for nested schema](#nestedatt--source)) |
| 94 | + |
| 95 | +### Optional |
| 96 | + |
| 97 | +- `description` (String) The description of the ClickPipe. |
| 98 | +- `field_mappings` (Attributes List) Field mapping between source and destination table. (see [below for nested schema](#nestedatt--field_mappings)) |
| 99 | +- `scaling` (Attributes) (see [below for nested schema](#nestedatt--scaling)) |
| 100 | +- `state` (String) The desired state of the ClickPipe. (`Running`, `Stopped`). Default is `Running`. |
| 101 | + |
| 102 | +### Read-Only |
| 103 | + |
| 104 | +- `id` (String) The ID of the ClickPipe. Generated by the ClickHouse Cloud. |
| 105 | + |
| 106 | +<a id="nestedatt--destination"></a> |
| 107 | +### Nested Schema for `destination` |
| 108 | + |
| 109 | +Required: |
| 110 | + |
| 111 | +- `columns` (Attributes List) The list of columns for the ClickHouse table. (see [below for nested schema](#nestedatt--destination--columns)) |
| 112 | +- `table` (String) The name of the ClickHouse table. |
| 113 | + |
| 114 | +Optional: |
| 115 | + |
| 116 | +- `database` (String) The name of the ClickHouse database. Default is `default`. |
| 117 | +- `managed_table` (Boolean) Whether the table is managed by ClickHouse Cloud. If `false`, the table must exist in the database. Default is `true`. |
| 118 | +- `roles` (List of String) ClickPipe will create a ClickHouse user with these roles. Add your custom roles here if required. |
| 119 | +- `table_definition` (Attributes) Definition of the destination table. Required for ClickPipes managed tables. (see [below for nested schema](#nestedatt--destination--table_definition)) |
| 120 | + |
| 121 | +<a id="nestedatt--destination--columns"></a> |
| 122 | +### Nested Schema for `destination.columns` |
| 123 | + |
| 124 | +Required: |
| 125 | + |
| 126 | +- `name` (String) The name of the column. |
| 127 | +- `type` (String) The type of the column. |
| 128 | + |
| 129 | + |
| 130 | +<a id="nestedatt--destination--table_definition"></a> |
| 131 | +### Nested Schema for `destination.table_definition` |
| 132 | + |
| 133 | +Required: |
| 134 | + |
| 135 | +- `engine` (Attributes) The engine of the ClickHouse table. (see [below for nested schema](#nestedatt--destination--table_definition--engine)) |
| 136 | + |
| 137 | +Optional: |
| 138 | + |
| 139 | +- `partition_by` (String) The column to partition the table by. |
| 140 | +- `primary_key` (String) The primary key of the table. |
| 141 | +- `sorting_key` (List of String) The list of columns for the sorting key. |
| 142 | + |
| 143 | +<a id="nestedatt--destination--table_definition--engine"></a> |
| 144 | +### Nested Schema for `destination.table_definition.engine` |
| 145 | + |
| 146 | +Required: |
| 147 | + |
| 148 | +- `type` (String) The type of the engine. Only `MergeTree` is supported. |
| 149 | + |
| 150 | + |
| 151 | + |
| 152 | + |
| 153 | +<a id="nestedatt--source"></a> |
| 154 | +### Nested Schema for `source` |
| 155 | + |
| 156 | +Optional: |
| 157 | + |
| 158 | +- `kafka` (Attributes) The Kafka source configuration for the ClickPipe. (see [below for nested schema](#nestedatt--source--kafka)) |
| 159 | +- `object_storage` (Attributes) The Kafka source configuration for the ClickPipe. (see [below for nested schema](#nestedatt--source--object_storage)) |
| 160 | + |
| 161 | +<a id="nestedatt--source--kafka"></a> |
| 162 | +### Nested Schema for `source.kafka` |
| 163 | + |
| 164 | +Required: |
| 165 | + |
| 166 | +- `brokers` (String) The list of Kafka bootstrap brokers. (comma separated) |
| 167 | +- `format` (String) The format of the Kafka source. (`JSONEachRow`, `Avro`, `AvroConfluent`) |
| 168 | +- `topics` (String) The list of Kafka topics. (comma separated) |
| 169 | + |
| 170 | +Optional: |
| 171 | + |
| 172 | +- `authentication` (String) The authentication method for the Kafka source. (`PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `IAM_ROLE`, `IAM_USER`). Default is `PLAIN`. |
| 173 | +- `ca_certificate` (String) PEM encoded CA certificates to validate the broker's certificate. |
| 174 | +- `consumer_group` (String) Consumer group of the Kafka source. If not provided `clickpipes-<ID>` will be used. |
| 175 | +- `credentials` (Attributes) The credentials for the Kafka source. (see [below for nested schema](#nestedatt--source--kafka--credentials)) |
| 176 | +- `iam_role` (String) The IAM role for the Kafka source. Use with `IAM_ROLE` authentication. It can be used with AWS ClickHouse service only. Read more in [ClickPipes documentation page](https://clickhouse.com/docs/en/integrations/clickpipes/kafka#iam) |
| 177 | +- `offset` (Attributes) The Kafka offset. (see [below for nested schema](#nestedatt--source--kafka--offset)) |
| 178 | +- `schema_registry` (Attributes) The schema registry for the Kafka source. (see [below for nested schema](#nestedatt--source--kafka--schema_registry)) |
| 179 | +- `type` (String) The type of the Kafka source. (`kafka`, `redpanda`, `confluent`, `msk`, `warpstream`, `azureeventhub`). Default is `kafka`. |
| 180 | + |
| 181 | +<a id="nestedatt--source--kafka--credentials"></a> |
| 182 | +### Nested Schema for `source.kafka.credentials` |
| 183 | + |
| 184 | +Optional: |
| 185 | + |
| 186 | +- `access_key_id` (String, Sensitive) The access key ID for the Kafka source. Use with `IAM_USER` authentication. |
| 187 | +- `connection_string` (String, Sensitive) The connection string for the Kafka source. Use with `azureeventhub` Kafka source type. Use with `PLAIN` authentication. |
| 188 | +- `password` (String, Sensitive) The password for the Kafka source. |
| 189 | +- `secret_key` (String, Sensitive) The secret key for the Kafka source. Use with `IAM_USER` authentication. |
| 190 | +- `username` (String, Sensitive) The username for the Kafka source. |
| 191 | + |
| 192 | + |
| 193 | +<a id="nestedatt--source--kafka--offset"></a> |
| 194 | +### Nested Schema for `source.kafka.offset` |
| 195 | + |
| 196 | +Required: |
| 197 | + |
| 198 | +- `strategy` (String) The offset strategy for the Kafka source. (`from_beginning`, `from_latest`, `from_timestamp`) |
| 199 | + |
| 200 | +Optional: |
| 201 | + |
| 202 | +- `timestamp` (String) The timestamp for the Kafka offset. Use with `from_timestamp` offset strategy. (format `2021-01-01T00:00`) |
| 203 | + |
| 204 | + |
| 205 | +<a id="nestedatt--source--kafka--schema_registry"></a> |
| 206 | +### Nested Schema for `source.kafka.schema_registry` |
| 207 | + |
| 208 | +Required: |
| 209 | + |
| 210 | +- `authentication` (String) The authentication method for the Schema Registry. Only supported is `PLAIN`. |
| 211 | +- `credentials` (Attributes) The credentials for the Schema Registry. (see [below for nested schema](#nestedatt--source--kafka--schema_registry--credentials)) |
| 212 | +- `url` (String) The URL of the schema registry. |
| 213 | + |
| 214 | +<a id="nestedatt--source--kafka--schema_registry--credentials"></a> |
| 215 | +### Nested Schema for `source.kafka.schema_registry.credentials` |
| 216 | + |
| 217 | +Required: |
| 218 | + |
| 219 | +- `password` (String, Sensitive) The password for the Schema Registry. |
| 220 | +- `username` (String, Sensitive) The username for the Schema Registry. |
| 221 | + |
| 222 | + |
| 223 | + |
| 224 | + |
| 225 | +<a id="nestedatt--source--object_storage"></a> |
| 226 | +### Nested Schema for `source.object_storage` |
| 227 | + |
| 228 | +Required: |
| 229 | + |
| 230 | +- `format` (String) The format of the S3 objects. (`JSONEachRow`, `CSV`, `CSVWithNames`, `Parquet`) |
| 231 | +- `url` (String) The URL of the S3 bucket. Provide a path to the file(s) you want to ingest. You can specify multiple files using bash-like wildcards. For more information, see the documentation on using wildcards in path: https://clickhouse.com/docs/en/integrations/clickpipes/object-storage#limitations |
| 232 | + |
| 233 | +Optional: |
| 234 | + |
| 235 | +- `access_key` (Attributes) Access key (see [below for nested schema](#nestedatt--source--object_storage--access_key)) |
| 236 | +- `authentication` (String) Authentication method. If not provided, no authentication is used. It can be used to access public buckets.. (`IAM_ROLE`, `IAM_USER`). |
| 237 | +- `compression` (String) Compression algorithm used for the files.. (`auto`, `gzip`, `brotli`, `br`, `xz`, `LZMA`, `zstd`) |
| 238 | +- `delimiter` (String) The delimiter for the S3 source. Default is `,`. |
| 239 | +- `iam_role` (String) The IAM role for the S3 source. Use with `IAM_ROLE` authentication. It can be used with AWS ClickHouse service only. Read more in [ClickPipes documentation page](https://clickhouse.com/docs/en/integrations/clickpipes/object-storage#authentication) |
| 240 | +- `is_continuous` (Boolean) If set to true, the pipe will continuously read new files from the source. If set to false, the pipe will read the files only once. New files have to be uploaded lexically order. |
| 241 | +- `type` (String) The type of the S3-compatbile source (`s3`, `gcs`). Default is `s3`. |
| 242 | + |
| 243 | +<a id="nestedatt--source--object_storage--access_key"></a> |
| 244 | +### Nested Schema for `source.object_storage.access_key` |
| 245 | + |
| 246 | +Optional: |
| 247 | + |
| 248 | +- `access_key_id` (String, Sensitive) The access key ID for the S3 source. Use with `IAM_USER` authentication. |
| 249 | +- `secret_key` (String, Sensitive) The secret key for the S3 source. Use with `IAM_USER` authentication. |
| 250 | + |
| 251 | + |
| 252 | + |
| 253 | + |
| 254 | +<a id="nestedatt--field_mappings"></a> |
| 255 | +### Nested Schema for `field_mappings` |
| 256 | + |
| 257 | +Required: |
| 258 | + |
| 259 | +- `destination_field` (String) The name of the column in destination table. |
| 260 | +- `source_field` (String) The name of the source field. |
| 261 | + |
| 262 | + |
| 263 | +<a id="nestedatt--scaling"></a> |
| 264 | +### Nested Schema for `scaling` |
| 265 | + |
| 266 | +Optional: |
| 267 | + |
| 268 | +- `replicas` (Number) The number of desired replicas for the ClickPipe. Default is 1. The maximum value is 10. |
0 commit comments