diff --git a/gapic-libraries-bom/pom.xml b/gapic-libraries-bom/pom.xml index 1a4ded39b1e5..84a2f9260ab8 100644 --- a/gapic-libraries-bom/pom.xml +++ b/gapic-libraries-bom/pom.xml @@ -4,7 +4,7 @@ com.google.cloud gapic-libraries-bom pom - 1.54.0 + 1.54.2 Google Cloud Java BOM BOM for the libraries in google-cloud-java repository. Users should not @@ -15,7 +15,7 @@ google-cloud-pom-parent com.google.cloud - 1.54.0 + 1.54.2 ../google-cloud-pom-parent/pom.xml @@ -729,7 +729,7 @@ com.google.cloud google-cloud-managedkafka-bom - 0.16.0 + 0.16.1 pom import @@ -1182,7 +1182,7 @@ com.google.cloud google-cloud-vertexai-bom - 1.20.0 + 1.20.1 pom import diff --git a/google-cloud-jar-parent/pom.xml b/google-cloud-jar-parent/pom.xml index 2e9add5ea4b3..ae089e06dd17 100644 --- a/google-cloud-jar-parent/pom.xml +++ b/google-cloud-jar-parent/pom.xml @@ -5,7 +5,7 @@ 4.0.0 google-cloud-jar-parent com.google.cloud - 1.54.0 + 1.54.2 pom Google Cloud JAR Parent @@ -15,7 +15,7 @@ com.google.cloud google-cloud-pom-parent - 1.54.0 + 1.54.2 ../google-cloud-pom-parent/pom.xml diff --git a/google-cloud-pom-parent/pom.xml b/google-cloud-pom-parent/pom.xml index 159efdc864c7..b569fc68b21f 100644 --- a/google-cloud-pom-parent/pom.xml +++ b/google-cloud-pom-parent/pom.xml @@ -5,7 +5,7 @@ 4.0.0 google-cloud-pom-parent com.google.cloud - 1.54.0 + 1.54.2 pom Google Cloud POM Parent https://github.com/googleapis/google-cloud-java diff --git a/java-managedkafka/README.md b/java-managedkafka/README.md index 2a68616b265c..576e6bf00882 100644 --- a/java-managedkafka/README.md +++ b/java-managedkafka/README.md @@ -45,20 +45,20 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-managedkafka - 0.15.0 + 0.16.1 ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-managedkafka:0.15.0' +implementation 'com.google.cloud:google-cloud-managedkafka:0.16.1' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-managedkafka" % "0.15.0" +libraryDependencies += "com.google.cloud" % "google-cloud-managedkafka" % "0.16.1" ``` ## Authentication @@ -200,7 +200,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/google-cloud-java/java11.html [stability-image]: https://img.shields.io/badge/stability-preview-yellow [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-managedkafka.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-managedkafka/0.15.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-managedkafka/0.16.1 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/java-managedkafka/google-cloud-managedkafka-bom/pom.xml b/java-managedkafka/google-cloud-managedkafka-bom/pom.xml index da2d9e401285..681814c18268 100644 --- a/java-managedkafka/google-cloud-managedkafka-bom/pom.xml +++ b/java-managedkafka/google-cloud-managedkafka-bom/pom.xml @@ -3,12 +3,12 @@ 4.0.0 com.google.cloud google-cloud-managedkafka-bom - 0.16.0 + 0.16.1 pom com.google.cloud google-cloud-pom-parent - 1.54.0 + 1.54.2 ../../google-cloud-pom-parent/pom.xml @@ -26,17 +26,17 @@ com.google.cloud google-cloud-managedkafka - 0.16.0 + 0.16.1 com.google.api.grpc grpc-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1 com.google.api.grpc proto-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1 diff --git a/java-managedkafka/google-cloud-managedkafka/pom.xml b/java-managedkafka/google-cloud-managedkafka/pom.xml index 60e9a918a63f..ee4335d84b12 100644 --- a/java-managedkafka/google-cloud-managedkafka/pom.xml +++ b/java-managedkafka/google-cloud-managedkafka/pom.xml @@ -3,14 +3,14 @@ 4.0.0 com.google.cloud google-cloud-managedkafka - 0.16.0 + 0.16.1 jar Google Managed Service for Apache Kafka API Managed Service for Apache Kafka API Manage Apache Kafka clusters and resources. com.google.cloud google-cloud-managedkafka-parent - 0.16.0 + 0.16.1 google-cloud-managedkafka diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClient.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClient.java new file mode 100644 index 000000000000..0540fa0692fd --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClient.java @@ -0,0 +1,2818 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.httpjson.longrunning.OperationsClient; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.stub.ManagedKafkaConnectStub; +import com.google.cloud.managedkafka.v1.stub.ManagedKafkaConnectStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: The service that a client application uses to manage Apache Kafka Connect + * clusters and connectors. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+ *   ConnectClusterName name =
+ *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+ *   ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the ManagedKafkaConnectClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

ListConnectClusters

Lists the Kafka Connect clusters in a given project and location.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listConnectClusters(ListConnectClustersRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listConnectClusters(LocationName parent) + *

  • listConnectClusters(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listConnectClustersPagedCallable() + *

  • listConnectClustersCallable() + *

+ *

GetConnectCluster

Returns the properties of a single Kafka Connect cluster.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getConnectCluster(GetConnectClusterRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getConnectCluster(ConnectClusterName name) + *

  • getConnectCluster(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getConnectClusterCallable() + *

+ *

CreateConnectCluster

Creates a new Kafka Connect cluster in a given project and location.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createConnectClusterAsync(CreateConnectClusterRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createConnectClusterAsync(LocationName parent, ConnectCluster connectCluster, String connectClusterId) + *

  • createConnectClusterAsync(String parent, ConnectCluster connectCluster, String connectClusterId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createConnectClusterOperationCallable() + *

  • createConnectClusterCallable() + *

+ *

UpdateConnectCluster

Updates the properties of a single Kafka Connect cluster.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateConnectClusterAsync(UpdateConnectClusterRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateConnectClusterAsync(ConnectCluster connectCluster, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateConnectClusterOperationCallable() + *

  • updateConnectClusterCallable() + *

+ *

DeleteConnectCluster

Deletes a single Connect cluster.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteConnectClusterAsync(DeleteConnectClusterRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • deleteConnectClusterAsync(ConnectClusterName name) + *

  • deleteConnectClusterAsync(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteConnectClusterOperationCallable() + *

  • deleteConnectClusterCallable() + *

+ *

ListConnectors

Lists the connectors in a given Connect cluster.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listConnectors(ListConnectorsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listConnectors(ConnectClusterName parent) + *

  • listConnectors(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listConnectorsPagedCallable() + *

  • listConnectorsCallable() + *

+ *

GetConnector

Returns the properties of a single connector.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getConnector(GetConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getConnector(ConnectorName name) + *

  • getConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getConnectorCallable() + *

+ *

CreateConnector

Creates a new connector in a given Connect cluster.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createConnector(CreateConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createConnector(ConnectClusterName parent, Connector connector, String connectorId) + *

  • createConnector(String parent, Connector connector, String connectorId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createConnectorCallable() + *

+ *

UpdateConnector

Updates the properties of a connector.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateConnector(UpdateConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateConnector(Connector connector, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateConnectorCallable() + *

+ *

DeleteConnector

Deletes a connector.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteConnector(DeleteConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteConnector(ConnectorName name) + *

  • deleteConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteConnectorCallable() + *

+ *

PauseConnector

Pauses the connector and its tasks.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • pauseConnector(PauseConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • pauseConnector(ConnectorName name) + *

  • pauseConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • pauseConnectorCallable() + *

+ *

ResumeConnector

Resumes the connector and its tasks.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • resumeConnector(ResumeConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • resumeConnector(ConnectorName name) + *

  • resumeConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • resumeConnectorCallable() + *

+ *

RestartConnector

Restarts the connector.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • restartConnector(RestartConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • restartConnector(ConnectorName name) + *

  • restartConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • restartConnectorCallable() + *

+ *

StopConnector

Stops the connector.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • stopConnector(StopConnectorRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • stopConnector(ConnectorName name) + *

  • stopConnector(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • stopConnectorCallable() + *

+ *

ListLocations

Lists information about the supported locations for this service.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listLocations(ListLocationsRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listLocationsPagedCallable() + *

  • listLocationsCallable() + *

+ *

GetLocation

Gets information about a location.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getLocation(GetLocationRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getLocationCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of ManagedKafkaConnectSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectSettings managedKafkaConnectSettings =
+ *     ManagedKafkaConnectSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * ManagedKafkaConnectClient managedKafkaConnectClient =
+ *     ManagedKafkaConnectClient.create(managedKafkaConnectSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectSettings managedKafkaConnectSettings =
+ *     ManagedKafkaConnectSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * ManagedKafkaConnectClient managedKafkaConnectClient =
+ *     ManagedKafkaConnectClient.create(managedKafkaConnectSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectSettings managedKafkaConnectSettings =
+ *     ManagedKafkaConnectSettings.newHttpJsonBuilder().build();
+ * ManagedKafkaConnectClient managedKafkaConnectClient =
+ *     ManagedKafkaConnectClient.create(managedKafkaConnectSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class ManagedKafkaConnectClient implements BackgroundResource { + private final ManagedKafkaConnectSettings settings; + private final ManagedKafkaConnectStub stub; + private final OperationsClient httpJsonOperationsClient; + private final com.google.longrunning.OperationsClient operationsClient; + + /** Constructs an instance of ManagedKafkaConnectClient with default settings. */ + public static final ManagedKafkaConnectClient create() throws IOException { + return create(ManagedKafkaConnectSettings.newBuilder().build()); + } + + /** + * Constructs an instance of ManagedKafkaConnectClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final ManagedKafkaConnectClient create(ManagedKafkaConnectSettings settings) + throws IOException { + return new ManagedKafkaConnectClient(settings); + } + + /** + * Constructs an instance of ManagedKafkaConnectClient, using the given stub for making calls. + * This is for advanced usage - prefer using create(ManagedKafkaConnectSettings). + */ + public static final ManagedKafkaConnectClient create(ManagedKafkaConnectStub stub) { + return new ManagedKafkaConnectClient(stub); + } + + /** + * Constructs an instance of ManagedKafkaConnectClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected ManagedKafkaConnectClient(ManagedKafkaConnectSettings settings) throws IOException { + this.settings = settings; + this.stub = ((ManagedKafkaConnectStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + protected ManagedKafkaConnectClient(ManagedKafkaConnectStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + public final ManagedKafkaConnectSettings getSettings() { + return settings; + } + + public ManagedKafkaConnectStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + public final com.google.longrunning.OperationsClient getOperationsClient() { + return operationsClient; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi + public final OperationsClient getHttpJsonOperationsClient() { + return httpJsonOperationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the Kafka Connect clusters in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]");
+   *   for (ConnectCluster element :
+   *       managedKafkaConnectClient.listConnectClusters(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent project/location whose Connect clusters are to be listed. + * Structured like `projects/{project}/locations/{location}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectClustersPagedResponse listConnectClusters(LocationName parent) { + ListConnectClustersRequest request = + ListConnectClustersRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listConnectClusters(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the Kafka Connect clusters in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String parent = LocationName.of("[PROJECT]", "[LOCATION]").toString();
+   *   for (ConnectCluster element :
+   *       managedKafkaConnectClient.listConnectClusters(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent project/location whose Connect clusters are to be listed. + * Structured like `projects/{project}/locations/{location}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectClustersPagedResponse listConnectClusters(String parent) { + ListConnectClustersRequest request = + ListConnectClustersRequest.newBuilder().setParent(parent).build(); + return listConnectClusters(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the Kafka Connect clusters in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectClustersRequest request =
+   *       ListConnectClustersRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setOrderBy("orderBy-1207110587")
+   *           .build();
+   *   for (ConnectCluster element :
+   *       managedKafkaConnectClient.listConnectClusters(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectClustersPagedResponse listConnectClusters( + ListConnectClustersRequest request) { + return listConnectClustersPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the Kafka Connect clusters in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectClustersRequest request =
+   *       ListConnectClustersRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setOrderBy("orderBy-1207110587")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.listConnectClustersPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (ConnectCluster element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listConnectClustersPagedCallable() { + return stub.listConnectClustersPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the Kafka Connect clusters in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectClustersRequest request =
+   *       ListConnectClustersRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setOrderBy("orderBy-1207110587")
+   *           .build();
+   *   while (true) {
+   *     ListConnectClustersResponse response =
+   *         managedKafkaConnectClient.listConnectClustersCallable().call(request);
+   *     for (ConnectCluster element : response.getConnectClustersList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listConnectClustersCallable() { + return stub.listConnectClustersCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectClusterName name =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+   *   ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name);
+   * }
+   * }
+ * + * @param name Required. The name of the Kafka Connect cluster whose configuration to return. + * Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ConnectCluster getConnectCluster(ConnectClusterName name) { + GetConnectClusterRequest request = + GetConnectClusterRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getConnectCluster(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString();
+   *   ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name);
+   * }
+   * }
+ * + * @param name Required. The name of the Kafka Connect cluster whose configuration to return. + * Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ConnectCluster getConnectCluster(String name) { + GetConnectClusterRequest request = GetConnectClusterRequest.newBuilder().setName(name).build(); + return getConnectCluster(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetConnectClusterRequest request =
+   *       GetConnectClusterRequest.newBuilder()
+   *           .setName(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .build();
+   *   ConnectCluster response = managedKafkaConnectClient.getConnectCluster(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ConnectCluster getConnectCluster(GetConnectClusterRequest request) { + return getConnectClusterCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetConnectClusterRequest request =
+   *       GetConnectClusterRequest.newBuilder()
+   *           .setName(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.getConnectClusterCallable().futureCall(request);
+   *   // Do something.
+   *   ConnectCluster response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getConnectClusterCallable() { + return stub.getConnectClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Kafka Connect cluster in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]");
+   *   ConnectCluster connectCluster = ConnectCluster.newBuilder().build();
+   *   String connectClusterId = "connectClusterId-1562078485";
+   *   ConnectCluster response =
+   *       managedKafkaConnectClient
+   *           .createConnectClusterAsync(parent, connectCluster, connectClusterId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The parent project/location in which to create the Kafka Connect + * cluster. Structured like `projects/{project}/locations/{location}/`. + * @param connectCluster Required. Configuration of the Kafka Connect cluster to create. Its + * `name` field is ignored. + * @param connectClusterId Required. The ID to use for the Connect cluster, which will become the + * final component of the cluster's name. The ID must be 1-63 characters long, and match the + * regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with RFC 1035. + *

This value is structured like: `my-cluster-id`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createConnectClusterAsync( + LocationName parent, ConnectCluster connectCluster, String connectClusterId) { + CreateConnectClusterRequest request = + CreateConnectClusterRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setConnectCluster(connectCluster) + .setConnectClusterId(connectClusterId) + .build(); + return createConnectClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Kafka Connect cluster in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String parent = LocationName.of("[PROJECT]", "[LOCATION]").toString();
+   *   ConnectCluster connectCluster = ConnectCluster.newBuilder().build();
+   *   String connectClusterId = "connectClusterId-1562078485";
+   *   ConnectCluster response =
+   *       managedKafkaConnectClient
+   *           .createConnectClusterAsync(parent, connectCluster, connectClusterId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The parent project/location in which to create the Kafka Connect + * cluster. Structured like `projects/{project}/locations/{location}/`. + * @param connectCluster Required. Configuration of the Kafka Connect cluster to create. Its + * `name` field is ignored. + * @param connectClusterId Required. The ID to use for the Connect cluster, which will become the + * final component of the cluster's name. The ID must be 1-63 characters long, and match the + * regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with RFC 1035. + *

This value is structured like: `my-cluster-id`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createConnectClusterAsync( + String parent, ConnectCluster connectCluster, String connectClusterId) { + CreateConnectClusterRequest request = + CreateConnectClusterRequest.newBuilder() + .setParent(parent) + .setConnectCluster(connectCluster) + .setConnectClusterId(connectClusterId) + .build(); + return createConnectClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Kafka Connect cluster in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   CreateConnectClusterRequest request =
+   *       CreateConnectClusterRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setConnectClusterId("connectClusterId-1562078485")
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ConnectCluster response = managedKafkaConnectClient.createConnectClusterAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createConnectClusterAsync( + CreateConnectClusterRequest request) { + return createConnectClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Kafka Connect cluster in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   CreateConnectClusterRequest request =
+   *       CreateConnectClusterRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setConnectClusterId("connectClusterId-1562078485")
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       managedKafkaConnectClient.createConnectClusterOperationCallable().futureCall(request);
+   *   // Do something.
+   *   ConnectCluster response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + createConnectClusterOperationCallable() { + return stub.createConnectClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Kafka Connect cluster in a given project and location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   CreateConnectClusterRequest request =
+   *       CreateConnectClusterRequest.newBuilder()
+   *           .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString())
+   *           .setConnectClusterId("connectClusterId-1562078485")
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.createConnectClusterCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createConnectClusterCallable() { + return stub.createConnectClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectCluster connectCluster = ConnectCluster.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   ConnectCluster response =
+   *       managedKafkaConnectClient.updateConnectClusterAsync(connectCluster, updateMask).get();
+   * }
+   * }
+ * + * @param connectCluster Required. The Kafka Connect cluster to update. Its `name` field must be + * populated. + * @param updateMask Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are relative to the + * resource, not the full request. A field will be overwritten if it is in the mask. The mask + * is required and a value of * will update all fields. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateConnectClusterAsync( + ConnectCluster connectCluster, FieldMask updateMask) { + UpdateConnectClusterRequest request = + UpdateConnectClusterRequest.newBuilder() + .setConnectCluster(connectCluster) + .setUpdateMask(updateMask) + .build(); + return updateConnectClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   UpdateConnectClusterRequest request =
+   *       UpdateConnectClusterRequest.newBuilder()
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ConnectCluster response = managedKafkaConnectClient.updateConnectClusterAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateConnectClusterAsync( + UpdateConnectClusterRequest request) { + return updateConnectClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   UpdateConnectClusterRequest request =
+   *       UpdateConnectClusterRequest.newBuilder()
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       managedKafkaConnectClient.updateConnectClusterOperationCallable().futureCall(request);
+   *   // Do something.
+   *   ConnectCluster response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + updateConnectClusterOperationCallable() { + return stub.updateConnectClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a single Kafka Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   UpdateConnectClusterRequest request =
+   *       UpdateConnectClusterRequest.newBuilder()
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setConnectCluster(ConnectCluster.newBuilder().build())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.updateConnectClusterCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateConnectClusterCallable() { + return stub.updateConnectClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a single Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectClusterName name =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+   *   managedKafkaConnectClient.deleteConnectClusterAsync(name).get();
+   * }
+   * }
+ * + * @param name Required. The name of the Kafka Connect cluster to delete. Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteConnectClusterAsync( + ConnectClusterName name) { + DeleteConnectClusterRequest request = + DeleteConnectClusterRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return deleteConnectClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a single Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString();
+   *   managedKafkaConnectClient.deleteConnectClusterAsync(name).get();
+   * }
+   * }
+ * + * @param name Required. The name of the Kafka Connect cluster to delete. Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteConnectClusterAsync(String name) { + DeleteConnectClusterRequest request = + DeleteConnectClusterRequest.newBuilder().setName(name).build(); + return deleteConnectClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a single Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   DeleteConnectClusterRequest request =
+   *       DeleteConnectClusterRequest.newBuilder()
+   *           .setName(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   managedKafkaConnectClient.deleteConnectClusterAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture deleteConnectClusterAsync( + DeleteConnectClusterRequest request) { + return deleteConnectClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a single Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   DeleteConnectClusterRequest request =
+   *       DeleteConnectClusterRequest.newBuilder()
+   *           .setName(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   OperationFuture future =
+   *       managedKafkaConnectClient.deleteConnectClusterOperationCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final OperationCallable + deleteConnectClusterOperationCallable() { + return stub.deleteConnectClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a single Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   DeleteConnectClusterRequest request =
+   *       DeleteConnectClusterRequest.newBuilder()
+   *           .setName(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setRequestId("requestId693933066")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.deleteConnectClusterCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + deleteConnectClusterCallable() { + return stub.deleteConnectClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the connectors in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectClusterName parent =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+   *   for (Connector element : managedKafkaConnectClient.listConnectors(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent Connect cluster whose connectors are to be listed. + * Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectorsPagedResponse listConnectors(ConnectClusterName parent) { + ListConnectorsRequest request = + ListConnectorsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listConnectors(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the connectors in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String parent =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString();
+   *   for (Connector element : managedKafkaConnectClient.listConnectors(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent Connect cluster whose connectors are to be listed. + * Structured like + * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectorsPagedResponse listConnectors(String parent) { + ListConnectorsRequest request = ListConnectorsRequest.newBuilder().setParent(parent).build(); + return listConnectors(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the connectors in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectorsRequest request =
+   *       ListConnectorsRequest.newBuilder()
+   *           .setParent(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Connector element : managedKafkaConnectClient.listConnectors(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListConnectorsPagedResponse listConnectors(ListConnectorsRequest request) { + return listConnectorsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the connectors in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectorsRequest request =
+   *       ListConnectorsRequest.newBuilder()
+   *           .setParent(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.listConnectorsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Connector element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listConnectorsPagedCallable() { + return stub.listConnectorsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the connectors in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListConnectorsRequest request =
+   *       ListConnectorsRequest.newBuilder()
+   *           .setParent(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListConnectorsResponse response =
+   *         managedKafkaConnectClient.listConnectorsCallable().call(request);
+   *     for (Connector element : response.getConnectorsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listConnectorsCallable() { + return stub.listConnectorsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   Connector response = managedKafkaConnectClient.getConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector whose configuration to return. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector getConnector(ConnectorName name) { + GetConnectorRequest request = + GetConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   Connector response = managedKafkaConnectClient.getConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector whose configuration to return. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector getConnector(String name) { + GetConnectorRequest request = GetConnectorRequest.newBuilder().setName(name).build(); + return getConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetConnectorRequest request =
+   *       GetConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   Connector response = managedKafkaConnectClient.getConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector getConnector(GetConnectorRequest request) { + return getConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the properties of a single connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetConnectorRequest request =
+   *       GetConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.getConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   Connector response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getConnectorCallable() { + return stub.getConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new connector in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectClusterName parent =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+   *   Connector connector = Connector.newBuilder().build();
+   *   String connectorId = "connectorId1724784200";
+   *   Connector response =
+   *       managedKafkaConnectClient.createConnector(parent, connector, connectorId);
+   * }
+   * }
+ * + * @param parent Required. The parent Connect cluster in which to create the connector. Structured + * like `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @param connector Required. The connector to create. + * @param connectorId Required. The ID to use for the connector, which will become the final + * component of the connector's name. The ID must be 1-63 characters long, and match the + * regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with RFC 1035. + *

This value is structured like: `my-connector-id`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector createConnector( + ConnectClusterName parent, Connector connector, String connectorId) { + CreateConnectorRequest request = + CreateConnectorRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setConnector(connector) + .setConnectorId(connectorId) + .build(); + return createConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new connector in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String parent =
+   *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString();
+   *   Connector connector = Connector.newBuilder().build();
+   *   String connectorId = "connectorId1724784200";
+   *   Connector response =
+   *       managedKafkaConnectClient.createConnector(parent, connector, connectorId);
+   * }
+   * }
+ * + * @param parent Required. The parent Connect cluster in which to create the connector. Structured + * like `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + * @param connector Required. The connector to create. + * @param connectorId Required. The ID to use for the connector, which will become the final + * component of the connector's name. The ID must be 1-63 characters long, and match the + * regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with RFC 1035. + *

This value is structured like: `my-connector-id`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector createConnector(String parent, Connector connector, String connectorId) { + CreateConnectorRequest request = + CreateConnectorRequest.newBuilder() + .setParent(parent) + .setConnector(connector) + .setConnectorId(connectorId) + .build(); + return createConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new connector in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   CreateConnectorRequest request =
+   *       CreateConnectorRequest.newBuilder()
+   *           .setParent(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setConnectorId("connectorId1724784200")
+   *           .setConnector(Connector.newBuilder().build())
+   *           .build();
+   *   Connector response = managedKafkaConnectClient.createConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector createConnector(CreateConnectorRequest request) { + return createConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new connector in a given Connect cluster. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   CreateConnectorRequest request =
+   *       CreateConnectorRequest.newBuilder()
+   *           .setParent(
+   *               ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString())
+   *           .setConnectorId("connectorId1724784200")
+   *           .setConnector(Connector.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.createConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   Connector response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createConnectorCallable() { + return stub.createConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   Connector connector = Connector.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   Connector response = managedKafkaConnectClient.updateConnector(connector, updateMask);
+   * }
+   * }
+ * + * @param connector Required. The connector to update. Its `name` field must be populated. + * @param updateMask Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are relative to the + * resource, not the full request. A field will be overwritten if it is in the mask. The mask + * is required and a value of * will update all fields. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector updateConnector(Connector connector, FieldMask updateMask) { + UpdateConnectorRequest request = + UpdateConnectorRequest.newBuilder() + .setConnector(connector) + .setUpdateMask(updateMask) + .build(); + return updateConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   UpdateConnectorRequest request =
+   *       UpdateConnectorRequest.newBuilder()
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setConnector(Connector.newBuilder().build())
+   *           .build();
+   *   Connector response = managedKafkaConnectClient.updateConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Connector updateConnector(UpdateConnectorRequest request) { + return updateConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the properties of a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   UpdateConnectorRequest request =
+   *       UpdateConnectorRequest.newBuilder()
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setConnector(Connector.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.updateConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   Connector response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateConnectorCallable() { + return stub.updateConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   managedKafkaConnectClient.deleteConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to delete. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteConnector(ConnectorName name) { + DeleteConnectorRequest request = + DeleteConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   managedKafkaConnectClient.deleteConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to delete. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteConnector(String name) { + DeleteConnectorRequest request = DeleteConnectorRequest.newBuilder().setName(name).build(); + deleteConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   DeleteConnectorRequest request =
+   *       DeleteConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   managedKafkaConnectClient.deleteConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteConnector(DeleteConnectorRequest request) { + deleteConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   DeleteConnectorRequest request =
+   *       DeleteConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.deleteConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteConnectorCallable() { + return stub.deleteConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to pause. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final PauseConnectorResponse pauseConnector(ConnectorName name) { + PauseConnectorRequest request = + PauseConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return pauseConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to pause. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final PauseConnectorResponse pauseConnector(String name) { + PauseConnectorRequest request = PauseConnectorRequest.newBuilder().setName(name).build(); + return pauseConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   PauseConnectorRequest request =
+   *       PauseConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final PauseConnectorResponse pauseConnector(PauseConnectorRequest request) { + return pauseConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Pauses the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   PauseConnectorRequest request =
+   *       PauseConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.pauseConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   PauseConnectorResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + pauseConnectorCallable() { + return stub.pauseConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to pause. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ResumeConnectorResponse resumeConnector(ConnectorName name) { + ResumeConnectorRequest request = + ResumeConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return resumeConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to pause. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ResumeConnectorResponse resumeConnector(String name) { + ResumeConnectorRequest request = ResumeConnectorRequest.newBuilder().setName(name).build(); + return resumeConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ResumeConnectorRequest request =
+   *       ResumeConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ResumeConnectorResponse resumeConnector(ResumeConnectorRequest request) { + return resumeConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Resumes the connector and its tasks. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ResumeConnectorRequest request =
+   *       ResumeConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.resumeConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   ResumeConnectorResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + resumeConnectorCallable() { + return stub.resumeConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restarts the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to restart. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final RestartConnectorResponse restartConnector(ConnectorName name) { + RestartConnectorRequest request = + RestartConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return restartConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restarts the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to restart. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final RestartConnectorResponse restartConnector(String name) { + RestartConnectorRequest request = RestartConnectorRequest.newBuilder().setName(name).build(); + return restartConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restarts the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   RestartConnectorRequest request =
+   *       RestartConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final RestartConnectorResponse restartConnector(RestartConnectorRequest request) { + return restartConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Restarts the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   RestartConnectorRequest request =
+   *       RestartConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.restartConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   RestartConnectorResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + restartConnectorCallable() { + return stub.restartConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ConnectorName name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]");
+   *   StopConnectorResponse response = managedKafkaConnectClient.stopConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to stop. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StopConnectorResponse stopConnector(ConnectorName name) { + StopConnectorRequest request = + StopConnectorRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return stopConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   String name =
+   *       ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *           .toString();
+   *   StopConnectorResponse response = managedKafkaConnectClient.stopConnector(name);
+   * }
+   * }
+ * + * @param name Required. The name of the connector to stop. Structured like: + * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StopConnectorResponse stopConnector(String name) { + StopConnectorRequest request = StopConnectorRequest.newBuilder().setName(name).build(); + return stopConnector(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   StopConnectorRequest request =
+   *       StopConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   StopConnectorResponse response = managedKafkaConnectClient.stopConnector(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final StopConnectorResponse stopConnector(StopConnectorRequest request) { + return stopConnectorCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Stops the connector. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   StopConnectorRequest request =
+   *       StopConnectorRequest.newBuilder()
+   *           .setName(
+   *               ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.stopConnectorCallable().futureCall(request);
+   *   // Do something.
+   *   StopConnectorResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable stopConnectorCallable() { + return stub.stopConnectorCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists information about the supported locations for this service. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListLocationsRequest request =
+   *       ListLocationsRequest.newBuilder()
+   *           .setName("name3373707")
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Location element : managedKafkaConnectClient.listLocations(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListLocationsPagedResponse listLocations(ListLocationsRequest request) { + return listLocationsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists information about the supported locations for this service. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListLocationsRequest request =
+   *       ListLocationsRequest.newBuilder()
+   *           .setName("name3373707")
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.listLocationsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Location element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listLocationsPagedCallable() { + return stub.listLocationsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists information about the supported locations for this service. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   ListLocationsRequest request =
+   *       ListLocationsRequest.newBuilder()
+   *           .setName("name3373707")
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListLocationsResponse response =
+   *         managedKafkaConnectClient.listLocationsCallable().call(request);
+   *     for (Location element : response.getLocationsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listLocationsCallable() { + return stub.listLocationsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build();
+   *   Location response = managedKafkaConnectClient.getLocation(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Location getLocation(GetLocationRequest request) { + return getLocationCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a location. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+   *   GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build();
+   *   ApiFuture future =
+   *       managedKafkaConnectClient.getLocationCallable().futureCall(request);
+   *   // Do something.
+   *   Location response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getLocationCallable() { + return stub.getLocationCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListConnectClustersPagedResponse + extends AbstractPagedListResponse< + ListConnectClustersRequest, + ListConnectClustersResponse, + ConnectCluster, + ListConnectClustersPage, + ListConnectClustersFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListConnectClustersPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListConnectClustersPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListConnectClustersPagedResponse(ListConnectClustersPage page) { + super(page, ListConnectClustersFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListConnectClustersPage + extends AbstractPage< + ListConnectClustersRequest, + ListConnectClustersResponse, + ConnectCluster, + ListConnectClustersPage> { + + private ListConnectClustersPage( + PageContext + context, + ListConnectClustersResponse response) { + super(context, response); + } + + private static ListConnectClustersPage createEmptyPage() { + return new ListConnectClustersPage(null, null); + } + + @Override + protected ListConnectClustersPage createPage( + PageContext + context, + ListConnectClustersResponse response) { + return new ListConnectClustersPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListConnectClustersFixedSizeCollection + extends AbstractFixedSizeCollection< + ListConnectClustersRequest, + ListConnectClustersResponse, + ConnectCluster, + ListConnectClustersPage, + ListConnectClustersFixedSizeCollection> { + + private ListConnectClustersFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListConnectClustersFixedSizeCollection createEmptyCollection() { + return new ListConnectClustersFixedSizeCollection(null, 0); + } + + @Override + protected ListConnectClustersFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListConnectClustersFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListConnectorsPagedResponse + extends AbstractPagedListResponse< + ListConnectorsRequest, + ListConnectorsResponse, + Connector, + ListConnectorsPage, + ListConnectorsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListConnectorsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListConnectorsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListConnectorsPagedResponse(ListConnectorsPage page) { + super(page, ListConnectorsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListConnectorsPage + extends AbstractPage< + ListConnectorsRequest, ListConnectorsResponse, Connector, ListConnectorsPage> { + + private ListConnectorsPage( + PageContext context, + ListConnectorsResponse response) { + super(context, response); + } + + private static ListConnectorsPage createEmptyPage() { + return new ListConnectorsPage(null, null); + } + + @Override + protected ListConnectorsPage createPage( + PageContext context, + ListConnectorsResponse response) { + return new ListConnectorsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListConnectorsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListConnectorsRequest, + ListConnectorsResponse, + Connector, + ListConnectorsPage, + ListConnectorsFixedSizeCollection> { + + private ListConnectorsFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListConnectorsFixedSizeCollection createEmptyCollection() { + return new ListConnectorsFixedSizeCollection(null, 0); + } + + @Override + protected ListConnectorsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListConnectorsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListLocationsPagedResponse + extends AbstractPagedListResponse< + ListLocationsRequest, + ListLocationsResponse, + Location, + ListLocationsPage, + ListLocationsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListLocationsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListLocationsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListLocationsPagedResponse(ListLocationsPage page) { + super(page, ListLocationsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListLocationsPage + extends AbstractPage< + ListLocationsRequest, ListLocationsResponse, Location, ListLocationsPage> { + + private ListLocationsPage( + PageContext context, + ListLocationsResponse response) { + super(context, response); + } + + private static ListLocationsPage createEmptyPage() { + return new ListLocationsPage(null, null); + } + + @Override + protected ListLocationsPage createPage( + PageContext context, + ListLocationsResponse response) { + return new ListLocationsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListLocationsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListLocationsRequest, + ListLocationsResponse, + Location, + ListLocationsPage, + ListLocationsFixedSizeCollection> { + + private ListLocationsFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListLocationsFixedSizeCollection createEmptyCollection() { + return new ListLocationsFixedSizeCollection(null, 0); + } + + @Override + protected ListLocationsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListLocationsFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectSettings.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectSettings.java new file mode 100644 index 000000000000..9156a1704c0b --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectSettings.java @@ -0,0 +1,468 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.stub.ManagedKafkaConnectStubSettings; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link ManagedKafkaConnectClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (managedkafka.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getConnectCluster: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectSettings.Builder managedKafkaConnectSettingsBuilder =
+ *     ManagedKafkaConnectSettings.newBuilder();
+ * managedKafkaConnectSettingsBuilder
+ *     .getConnectClusterSettings()
+ *     .setRetrySettings(
+ *         managedKafkaConnectSettingsBuilder
+ *             .getConnectClusterSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * ManagedKafkaConnectSettings managedKafkaConnectSettings =
+ *     managedKafkaConnectSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createConnectCluster: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectSettings.Builder managedKafkaConnectSettingsBuilder =
+ *     ManagedKafkaConnectSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * managedKafkaConnectSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class ManagedKafkaConnectSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to listConnectClusters. */ + public PagedCallSettings< + ListConnectClustersRequest, ListConnectClustersResponse, ListConnectClustersPagedResponse> + listConnectClustersSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).listConnectClustersSettings(); + } + + /** Returns the object with the settings used for calls to getConnectCluster. */ + public UnaryCallSettings getConnectClusterSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).getConnectClusterSettings(); + } + + /** Returns the object with the settings used for calls to createConnectCluster. */ + public UnaryCallSettings createConnectClusterSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).createConnectClusterSettings(); + } + + /** Returns the object with the settings used for calls to createConnectCluster. */ + public OperationCallSettings + createConnectClusterOperationSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()) + .createConnectClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateConnectCluster. */ + public UnaryCallSettings updateConnectClusterSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).updateConnectClusterSettings(); + } + + /** Returns the object with the settings used for calls to updateConnectCluster. */ + public OperationCallSettings + updateConnectClusterOperationSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()) + .updateConnectClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteConnectCluster. */ + public UnaryCallSettings deleteConnectClusterSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).deleteConnectClusterSettings(); + } + + /** Returns the object with the settings used for calls to deleteConnectCluster. */ + public OperationCallSettings + deleteConnectClusterOperationSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()) + .deleteConnectClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to listConnectors. */ + public PagedCallSettings< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).listConnectorsSettings(); + } + + /** Returns the object with the settings used for calls to getConnector. */ + public UnaryCallSettings getConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).getConnectorSettings(); + } + + /** Returns the object with the settings used for calls to createConnector. */ + public UnaryCallSettings createConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).createConnectorSettings(); + } + + /** Returns the object with the settings used for calls to updateConnector. */ + public UnaryCallSettings updateConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).updateConnectorSettings(); + } + + /** Returns the object with the settings used for calls to deleteConnector. */ + public UnaryCallSettings deleteConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).deleteConnectorSettings(); + } + + /** Returns the object with the settings used for calls to pauseConnector. */ + public UnaryCallSettings pauseConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).pauseConnectorSettings(); + } + + /** Returns the object with the settings used for calls to resumeConnector. */ + public UnaryCallSettings + resumeConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).resumeConnectorSettings(); + } + + /** Returns the object with the settings used for calls to restartConnector. */ + public UnaryCallSettings + restartConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).restartConnectorSettings(); + } + + /** Returns the object with the settings used for calls to stopConnector. */ + public UnaryCallSettings stopConnectorSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).stopConnectorSettings(); + } + + /** Returns the object with the settings used for calls to listLocations. */ + public PagedCallSettings + listLocationsSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).listLocationsSettings(); + } + + /** Returns the object with the settings used for calls to getLocation. */ + public UnaryCallSettings getLocationSettings() { + return ((ManagedKafkaConnectStubSettings) getStubSettings()).getLocationSettings(); + } + + public static final ManagedKafkaConnectSettings create(ManagedKafkaConnectStubSettings stub) + throws IOException { + return new ManagedKafkaConnectSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return ManagedKafkaConnectStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return ManagedKafkaConnectStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return ManagedKafkaConnectStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected ManagedKafkaConnectSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for ManagedKafkaConnectSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(ManagedKafkaConnectStubSettings.newBuilder(clientContext)); + } + + protected Builder(ManagedKafkaConnectSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(ManagedKafkaConnectStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(ManagedKafkaConnectStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(ManagedKafkaConnectStubSettings.newHttpJsonBuilder()); + } + + public ManagedKafkaConnectStubSettings.Builder getStubSettingsBuilder() { + return ((ManagedKafkaConnectStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to listConnectClusters. */ + public PagedCallSettings.Builder< + ListConnectClustersRequest, + ListConnectClustersResponse, + ListConnectClustersPagedResponse> + listConnectClustersSettings() { + return getStubSettingsBuilder().listConnectClustersSettings(); + } + + /** Returns the builder for the settings used for calls to getConnectCluster. */ + public UnaryCallSettings.Builder + getConnectClusterSettings() { + return getStubSettingsBuilder().getConnectClusterSettings(); + } + + /** Returns the builder for the settings used for calls to createConnectCluster. */ + public UnaryCallSettings.Builder + createConnectClusterSettings() { + return getStubSettingsBuilder().createConnectClusterSettings(); + } + + /** Returns the builder for the settings used for calls to createConnectCluster. */ + public OperationCallSettings.Builder< + CreateConnectClusterRequest, ConnectCluster, OperationMetadata> + createConnectClusterOperationSettings() { + return getStubSettingsBuilder().createConnectClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateConnectCluster. */ + public UnaryCallSettings.Builder + updateConnectClusterSettings() { + return getStubSettingsBuilder().updateConnectClusterSettings(); + } + + /** Returns the builder for the settings used for calls to updateConnectCluster. */ + public OperationCallSettings.Builder< + UpdateConnectClusterRequest, ConnectCluster, OperationMetadata> + updateConnectClusterOperationSettings() { + return getStubSettingsBuilder().updateConnectClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteConnectCluster. */ + public UnaryCallSettings.Builder + deleteConnectClusterSettings() { + return getStubSettingsBuilder().deleteConnectClusterSettings(); + } + + /** Returns the builder for the settings used for calls to deleteConnectCluster. */ + public OperationCallSettings.Builder + deleteConnectClusterOperationSettings() { + return getStubSettingsBuilder().deleteConnectClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to listConnectors. */ + public PagedCallSettings.Builder< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings() { + return getStubSettingsBuilder().listConnectorsSettings(); + } + + /** Returns the builder for the settings used for calls to getConnector. */ + public UnaryCallSettings.Builder getConnectorSettings() { + return getStubSettingsBuilder().getConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to createConnector. */ + public UnaryCallSettings.Builder createConnectorSettings() { + return getStubSettingsBuilder().createConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to updateConnector. */ + public UnaryCallSettings.Builder updateConnectorSettings() { + return getStubSettingsBuilder().updateConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to deleteConnector. */ + public UnaryCallSettings.Builder deleteConnectorSettings() { + return getStubSettingsBuilder().deleteConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to pauseConnector. */ + public UnaryCallSettings.Builder + pauseConnectorSettings() { + return getStubSettingsBuilder().pauseConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to resumeConnector. */ + public UnaryCallSettings.Builder + resumeConnectorSettings() { + return getStubSettingsBuilder().resumeConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to restartConnector. */ + public UnaryCallSettings.Builder + restartConnectorSettings() { + return getStubSettingsBuilder().restartConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to stopConnector. */ + public UnaryCallSettings.Builder + stopConnectorSettings() { + return getStubSettingsBuilder().stopConnectorSettings(); + } + + /** Returns the builder for the settings used for calls to listLocations. */ + public PagedCallSettings.Builder< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse> + listLocationsSettings() { + return getStubSettingsBuilder().listLocationsSettings(); + } + + /** Returns the builder for the settings used for calls to getLocation. */ + public UnaryCallSettings.Builder getLocationSettings() { + return getStubSettingsBuilder().getLocationSettings(); + } + + @Override + public ManagedKafkaConnectSettings build() throws IOException { + return new ManagedKafkaConnectSettings(this); + } + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/gapic_metadata.json b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/gapic_metadata.json index 3f85d676e519..792128c4e73c 100644 --- a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/gapic_metadata.json +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/gapic_metadata.json @@ -61,6 +61,63 @@ } } } + }, + "ManagedKafkaConnect": { + "clients": { + "grpc": { + "libraryClient": "ManagedKafkaConnectClient", + "rpcs": { + "CreateConnectCluster": { + "methods": ["createConnectClusterAsync", "createConnectClusterAsync", "createConnectClusterAsync", "createConnectClusterOperationCallable", "createConnectClusterCallable"] + }, + "CreateConnector": { + "methods": ["createConnector", "createConnector", "createConnector", "createConnectorCallable"] + }, + "DeleteConnectCluster": { + "methods": ["deleteConnectClusterAsync", "deleteConnectClusterAsync", "deleteConnectClusterAsync", "deleteConnectClusterOperationCallable", "deleteConnectClusterCallable"] + }, + "DeleteConnector": { + "methods": ["deleteConnector", "deleteConnector", "deleteConnector", "deleteConnectorCallable"] + }, + "GetConnectCluster": { + "methods": ["getConnectCluster", "getConnectCluster", "getConnectCluster", "getConnectClusterCallable"] + }, + "GetConnector": { + "methods": ["getConnector", "getConnector", "getConnector", "getConnectorCallable"] + }, + "GetLocation": { + "methods": ["getLocation", "getLocationCallable"] + }, + "ListConnectClusters": { + "methods": ["listConnectClusters", "listConnectClusters", "listConnectClusters", "listConnectClustersPagedCallable", "listConnectClustersCallable"] + }, + "ListConnectors": { + "methods": ["listConnectors", "listConnectors", "listConnectors", "listConnectorsPagedCallable", "listConnectorsCallable"] + }, + "ListLocations": { + "methods": ["listLocations", "listLocationsPagedCallable", "listLocationsCallable"] + }, + "PauseConnector": { + "methods": ["pauseConnector", "pauseConnector", "pauseConnector", "pauseConnectorCallable"] + }, + "RestartConnector": { + "methods": ["restartConnector", "restartConnector", "restartConnector", "restartConnectorCallable"] + }, + "ResumeConnector": { + "methods": ["resumeConnector", "resumeConnector", "resumeConnector", "resumeConnectorCallable"] + }, + "StopConnector": { + "methods": ["stopConnector", "stopConnector", "stopConnector", "stopConnectorCallable"] + }, + "UpdateConnectCluster": { + "methods": ["updateConnectClusterAsync", "updateConnectClusterAsync", "updateConnectClusterOperationCallable", "updateConnectClusterCallable"] + }, + "UpdateConnector": { + "methods": ["updateConnector", "updateConnector", "updateConnectorCallable"] + } + } + } + } } } } \ No newline at end of file diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/package-info.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/package-info.java index 57c568541fb8..8a89707b2d9e 100644 --- a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/package-info.java +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/package-info.java @@ -37,6 +37,26 @@ * Cluster response = managedKafkaClient.getCluster(name); * } * } + * + *

======================= ManagedKafkaConnectClient ======================= + * + *

Service Description: The service that a client application uses to manage Apache Kafka Connect + * clusters and connectors. + * + *

Sample for ManagedKafkaConnectClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) {
+ *   ConnectClusterName name =
+ *       ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]");
+ *   ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name);
+ * }
+ * }
*/ @Generated("by gapic-generator-java") package com.google.cloud.managedkafka.v1; diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectCallableFactory.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectCallableFactory.java new file mode 100644 index 000000000000..5255c68af3b8 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the ManagedKafkaConnect service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcManagedKafkaConnectCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectStub.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectStub.java new file mode 100644 index 000000000000..422e542c098f --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/GrpcManagedKafkaConnectStub.java @@ -0,0 +1,738 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersResponse; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ListConnectorsResponse; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the ManagedKafkaConnect service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcManagedKafkaConnectStub extends ManagedKafkaConnectStub { + private static final MethodDescriptor + listConnectClustersMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/ListConnectClusters") + .setRequestMarshaller( + ProtoUtils.marshaller(ListConnectClustersRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListConnectClustersResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + getConnectClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/GetConnectCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(GetConnectClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ConnectCluster.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + createConnectClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/CreateConnectCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateConnectClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + updateConnectClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/UpdateConnectCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateConnectClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + deleteConnectClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/DeleteConnectCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteConnectClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + listConnectorsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/ListConnectors") + .setRequestMarshaller( + ProtoUtils.marshaller(ListConnectorsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListConnectorsResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + getConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/GetConnector") + .setRequestMarshaller(ProtoUtils.marshaller(GetConnectorRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Connector.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + createConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/CreateConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateConnectorRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Connector.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + updateConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/UpdateConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateConnectorRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Connector.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + deleteConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/DeleteConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteConnectorRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + pauseConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/PauseConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(PauseConnectorRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(PauseConnectorResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + resumeConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/ResumeConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(ResumeConnectorRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ResumeConnectorResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + restartConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/RestartConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(RestartConnectorRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(RestartConnectorResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + stopConnectorMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/StopConnector") + .setRequestMarshaller( + ProtoUtils.marshaller(StopConnectorRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(StopConnectorResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + listLocationsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.location.Locations/ListLocations") + .setRequestMarshaller( + ProtoUtils.marshaller(ListLocationsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListLocationsResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor getLocationMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.location.Locations/GetLocation") + .setRequestMarshaller(ProtoUtils.marshaller(GetLocationRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Location.getDefaultInstance())) + .build(); + + private final UnaryCallable + listConnectClustersCallable; + private final UnaryCallable + listConnectClustersPagedCallable; + private final UnaryCallable getConnectClusterCallable; + private final UnaryCallable createConnectClusterCallable; + private final OperationCallable + createConnectClusterOperationCallable; + private final UnaryCallable updateConnectClusterCallable; + private final OperationCallable + updateConnectClusterOperationCallable; + private final UnaryCallable deleteConnectClusterCallable; + private final OperationCallable + deleteConnectClusterOperationCallable; + private final UnaryCallable listConnectorsCallable; + private final UnaryCallable + listConnectorsPagedCallable; + private final UnaryCallable getConnectorCallable; + private final UnaryCallable createConnectorCallable; + private final UnaryCallable updateConnectorCallable; + private final UnaryCallable deleteConnectorCallable; + private final UnaryCallable pauseConnectorCallable; + private final UnaryCallable + resumeConnectorCallable; + private final UnaryCallable + restartConnectorCallable; + private final UnaryCallable stopConnectorCallable; + private final UnaryCallable listLocationsCallable; + private final UnaryCallable + listLocationsPagedCallable; + private final UnaryCallable getLocationCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcManagedKafkaConnectStub create(ManagedKafkaConnectStubSettings settings) + throws IOException { + return new GrpcManagedKafkaConnectStub(settings, ClientContext.create(settings)); + } + + public static final GrpcManagedKafkaConnectStub create(ClientContext clientContext) + throws IOException { + return new GrpcManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcManagedKafkaConnectStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcManagedKafkaConnectStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcManagedKafkaConnectCallableFactory()); + } + + /** + * Constructs an instance of GrpcManagedKafkaConnectStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + listConnectClustersTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listConnectClustersMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getConnectClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getConnectClusterMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings createConnectClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createConnectClusterMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateConnectClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateConnectClusterMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "connect_cluster.name", + String.valueOf(request.getConnectCluster().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteConnectClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteConnectClusterMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listConnectorsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listConnectorsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings createConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("connector.name", String.valueOf(request.getConnector().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + pauseConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(pauseConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + resumeConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(resumeConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + restartConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(restartConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings stopConnectorTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(stopConnectorMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings listLocationsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listLocationsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings getLocationTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getLocationMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + + this.listConnectClustersCallable = + callableFactory.createUnaryCallable( + listConnectClustersTransportSettings, + settings.listConnectClustersSettings(), + clientContext); + this.listConnectClustersPagedCallable = + callableFactory.createPagedCallable( + listConnectClustersTransportSettings, + settings.listConnectClustersSettings(), + clientContext); + this.getConnectClusterCallable = + callableFactory.createUnaryCallable( + getConnectClusterTransportSettings, + settings.getConnectClusterSettings(), + clientContext); + this.createConnectClusterCallable = + callableFactory.createUnaryCallable( + createConnectClusterTransportSettings, + settings.createConnectClusterSettings(), + clientContext); + this.createConnectClusterOperationCallable = + callableFactory.createOperationCallable( + createConnectClusterTransportSettings, + settings.createConnectClusterOperationSettings(), + clientContext, + operationsStub); + this.updateConnectClusterCallable = + callableFactory.createUnaryCallable( + updateConnectClusterTransportSettings, + settings.updateConnectClusterSettings(), + clientContext); + this.updateConnectClusterOperationCallable = + callableFactory.createOperationCallable( + updateConnectClusterTransportSettings, + settings.updateConnectClusterOperationSettings(), + clientContext, + operationsStub); + this.deleteConnectClusterCallable = + callableFactory.createUnaryCallable( + deleteConnectClusterTransportSettings, + settings.deleteConnectClusterSettings(), + clientContext); + this.deleteConnectClusterOperationCallable = + callableFactory.createOperationCallable( + deleteConnectClusterTransportSettings, + settings.deleteConnectClusterOperationSettings(), + clientContext, + operationsStub); + this.listConnectorsCallable = + callableFactory.createUnaryCallable( + listConnectorsTransportSettings, settings.listConnectorsSettings(), clientContext); + this.listConnectorsPagedCallable = + callableFactory.createPagedCallable( + listConnectorsTransportSettings, settings.listConnectorsSettings(), clientContext); + this.getConnectorCallable = + callableFactory.createUnaryCallable( + getConnectorTransportSettings, settings.getConnectorSettings(), clientContext); + this.createConnectorCallable = + callableFactory.createUnaryCallable( + createConnectorTransportSettings, settings.createConnectorSettings(), clientContext); + this.updateConnectorCallable = + callableFactory.createUnaryCallable( + updateConnectorTransportSettings, settings.updateConnectorSettings(), clientContext); + this.deleteConnectorCallable = + callableFactory.createUnaryCallable( + deleteConnectorTransportSettings, settings.deleteConnectorSettings(), clientContext); + this.pauseConnectorCallable = + callableFactory.createUnaryCallable( + pauseConnectorTransportSettings, settings.pauseConnectorSettings(), clientContext); + this.resumeConnectorCallable = + callableFactory.createUnaryCallable( + resumeConnectorTransportSettings, settings.resumeConnectorSettings(), clientContext); + this.restartConnectorCallable = + callableFactory.createUnaryCallable( + restartConnectorTransportSettings, settings.restartConnectorSettings(), clientContext); + this.stopConnectorCallable = + callableFactory.createUnaryCallable( + stopConnectorTransportSettings, settings.stopConnectorSettings(), clientContext); + this.listLocationsCallable = + callableFactory.createUnaryCallable( + listLocationsTransportSettings, settings.listLocationsSettings(), clientContext); + this.listLocationsPagedCallable = + callableFactory.createPagedCallable( + listLocationsTransportSettings, settings.listLocationsSettings(), clientContext); + this.getLocationCallable = + callableFactory.createUnaryCallable( + getLocationTransportSettings, settings.getLocationSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable + listConnectClustersCallable() { + return listConnectClustersCallable; + } + + @Override + public UnaryCallable + listConnectClustersPagedCallable() { + return listConnectClustersPagedCallable; + } + + @Override + public UnaryCallable getConnectClusterCallable() { + return getConnectClusterCallable; + } + + @Override + public UnaryCallable createConnectClusterCallable() { + return createConnectClusterCallable; + } + + @Override + public OperationCallable + createConnectClusterOperationCallable() { + return createConnectClusterOperationCallable; + } + + @Override + public UnaryCallable updateConnectClusterCallable() { + return updateConnectClusterCallable; + } + + @Override + public OperationCallable + updateConnectClusterOperationCallable() { + return updateConnectClusterOperationCallable; + } + + @Override + public UnaryCallable deleteConnectClusterCallable() { + return deleteConnectClusterCallable; + } + + @Override + public OperationCallable + deleteConnectClusterOperationCallable() { + return deleteConnectClusterOperationCallable; + } + + @Override + public UnaryCallable listConnectorsCallable() { + return listConnectorsCallable; + } + + @Override + public UnaryCallable + listConnectorsPagedCallable() { + return listConnectorsPagedCallable; + } + + @Override + public UnaryCallable getConnectorCallable() { + return getConnectorCallable; + } + + @Override + public UnaryCallable createConnectorCallable() { + return createConnectorCallable; + } + + @Override + public UnaryCallable updateConnectorCallable() { + return updateConnectorCallable; + } + + @Override + public UnaryCallable deleteConnectorCallable() { + return deleteConnectorCallable; + } + + @Override + public UnaryCallable pauseConnectorCallable() { + return pauseConnectorCallable; + } + + @Override + public UnaryCallable resumeConnectorCallable() { + return resumeConnectorCallable; + } + + @Override + public UnaryCallable + restartConnectorCallable() { + return restartConnectorCallable; + } + + @Override + public UnaryCallable stopConnectorCallable() { + return stopConnectorCallable; + } + + @Override + public UnaryCallable listLocationsCallable() { + return listLocationsCallable; + } + + @Override + public UnaryCallable + listLocationsPagedCallable() { + return listLocationsPagedCallable; + } + + @Override + public UnaryCallable getLocationCallable() { + return getLocationCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectCallableFactory.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectCallableFactory.java new file mode 100644 index 000000000000..7ba115146fba --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectCallableFactory.java @@ -0,0 +1,101 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the ManagedKafkaConnect service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class HttpJsonManagedKafkaConnectCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectStub.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectStub.java new file mode 100644 index 000000000000..43b291e47a56 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/HttpJsonManagedKafkaConnectStub.java @@ -0,0 +1,1261 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.HttpRule; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshot; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.httpjson.longrunning.stub.HttpJsonOperationsStub; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersResponse; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ListConnectorsResponse; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.common.collect.ImmutableMap; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the ManagedKafkaConnect service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class HttpJsonManagedKafkaConnectStub extends ManagedKafkaConnectStub { + private static final TypeRegistry typeRegistry = + TypeRegistry.newBuilder() + .add(Empty.getDescriptor()) + .add(ConnectCluster.getDescriptor()) + .add(OperationMetadata.getDescriptor()) + .build(); + + private static final ApiMethodDescriptor + listConnectClustersMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/ListConnectClusters") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/locations/*}/connectClusters", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "orderBy", request.getOrderBy()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListConnectClustersResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getConnectClusterMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/GetConnectCluster") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ConnectCluster.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createConnectClusterMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/CreateConnectCluster") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/locations/*}/connectClusters", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "connectClusterId", request.getConnectClusterId()); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("connectCluster", request.getConnectCluster(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateConnectClusterRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + updateConnectClusterMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/UpdateConnectCluster") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{connectCluster.name=projects/*/locations/*/connectClusters/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "connectCluster.name", + request.getConnectCluster().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("connectCluster", request.getConnectCluster(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateConnectClusterRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + deleteConnectClusterMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/DeleteConnectCluster") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "requestId", request.getRequestId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (DeleteConnectClusterRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + listConnectorsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/ListConnectors") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/locations/*/connectClusters/*}/connectors", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListConnectorsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/GetConnector") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Connector.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/CreateConnector") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/locations/*/connectClusters/*}/connectors", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "connectorId", request.getConnectorId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("connector", request.getConnector(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Connector.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/UpdateConnector") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{connector.name=projects/*/locations/*/connectClusters/*/connectors/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "connector.name", request.getConnector().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("connector", request.getConnector(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Connector.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/DeleteConnector") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + pauseConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/PauseConnector") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:pause", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(PauseConnectorResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + resumeConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/ResumeConnector") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:resume", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ResumeConnectorResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + restartConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.cloud.managedkafka.v1.ManagedKafkaConnect/RestartConnector") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:restart", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(RestartConnectorResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + stopConnectorMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.managedkafka.v1.ManagedKafkaConnect/StopConnector") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:stop", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(StopConnectorResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listLocationsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.location.Locations/ListLocations") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*}/locations", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListLocationsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getLocationMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.cloud.location.Locations/GetLocation") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/locations/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Location.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private final UnaryCallable + listConnectClustersCallable; + private final UnaryCallable + listConnectClustersPagedCallable; + private final UnaryCallable getConnectClusterCallable; + private final UnaryCallable createConnectClusterCallable; + private final OperationCallable + createConnectClusterOperationCallable; + private final UnaryCallable updateConnectClusterCallable; + private final OperationCallable + updateConnectClusterOperationCallable; + private final UnaryCallable deleteConnectClusterCallable; + private final OperationCallable + deleteConnectClusterOperationCallable; + private final UnaryCallable listConnectorsCallable; + private final UnaryCallable + listConnectorsPagedCallable; + private final UnaryCallable getConnectorCallable; + private final UnaryCallable createConnectorCallable; + private final UnaryCallable updateConnectorCallable; + private final UnaryCallable deleteConnectorCallable; + private final UnaryCallable pauseConnectorCallable; + private final UnaryCallable + resumeConnectorCallable; + private final UnaryCallable + restartConnectorCallable; + private final UnaryCallable stopConnectorCallable; + private final UnaryCallable listLocationsCallable; + private final UnaryCallable + listLocationsPagedCallable; + private final UnaryCallable getLocationCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonOperationsStub httpJsonOperationsStub; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonManagedKafkaConnectStub create( + ManagedKafkaConnectStubSettings settings) throws IOException { + return new HttpJsonManagedKafkaConnectStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonManagedKafkaConnectStub create(ClientContext clientContext) + throws IOException { + return new HttpJsonManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonManagedKafkaConnectStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings.newHttpJsonBuilder().build(), + clientContext, + callableFactory); + } + + /** + * Constructs an instance of HttpJsonManagedKafkaConnectStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new HttpJsonManagedKafkaConnectCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonManagedKafkaConnectStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonManagedKafkaConnectStub( + ManagedKafkaConnectStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.httpJsonOperationsStub = + HttpJsonOperationsStub.create( + clientContext, + callableFactory, + typeRegistry, + ImmutableMap.builder() + .put( + "google.longrunning.Operations.CancelOperation", + HttpRule.newBuilder() + .setPost("/v1/{name=projects/*/locations/*/operations/*}:cancel") + .build()) + .put( + "google.longrunning.Operations.DeleteOperation", + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/locations/*/operations/*}") + .build()) + .put( + "google.longrunning.Operations.GetOperation", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/locations/*/operations/*}") + .build()) + .put( + "google.longrunning.Operations.ListOperations", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/locations/*}/operations") + .build()) + .build()); + + HttpJsonCallSettings + listConnectClustersTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listConnectClustersMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + getConnectClusterTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getConnectClusterMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + createConnectClusterTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createConnectClusterMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateConnectClusterTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateConnectClusterMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "connect_cluster.name", + String.valueOf(request.getConnectCluster().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + deleteConnectClusterTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteConnectClusterMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listConnectorsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listConnectorsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings getConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings createConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings updateConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("connector.name", String.valueOf(request.getConnector().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + pauseConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(pauseConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + resumeConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(resumeConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + restartConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(restartConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + stopConnectorTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(stopConnectorMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listLocationsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listLocationsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings getLocationTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getLocationMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + + this.listConnectClustersCallable = + callableFactory.createUnaryCallable( + listConnectClustersTransportSettings, + settings.listConnectClustersSettings(), + clientContext); + this.listConnectClustersPagedCallable = + callableFactory.createPagedCallable( + listConnectClustersTransportSettings, + settings.listConnectClustersSettings(), + clientContext); + this.getConnectClusterCallable = + callableFactory.createUnaryCallable( + getConnectClusterTransportSettings, + settings.getConnectClusterSettings(), + clientContext); + this.createConnectClusterCallable = + callableFactory.createUnaryCallable( + createConnectClusterTransportSettings, + settings.createConnectClusterSettings(), + clientContext); + this.createConnectClusterOperationCallable = + callableFactory.createOperationCallable( + createConnectClusterTransportSettings, + settings.createConnectClusterOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.updateConnectClusterCallable = + callableFactory.createUnaryCallable( + updateConnectClusterTransportSettings, + settings.updateConnectClusterSettings(), + clientContext); + this.updateConnectClusterOperationCallable = + callableFactory.createOperationCallable( + updateConnectClusterTransportSettings, + settings.updateConnectClusterOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.deleteConnectClusterCallable = + callableFactory.createUnaryCallable( + deleteConnectClusterTransportSettings, + settings.deleteConnectClusterSettings(), + clientContext); + this.deleteConnectClusterOperationCallable = + callableFactory.createOperationCallable( + deleteConnectClusterTransportSettings, + settings.deleteConnectClusterOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.listConnectorsCallable = + callableFactory.createUnaryCallable( + listConnectorsTransportSettings, settings.listConnectorsSettings(), clientContext); + this.listConnectorsPagedCallable = + callableFactory.createPagedCallable( + listConnectorsTransportSettings, settings.listConnectorsSettings(), clientContext); + this.getConnectorCallable = + callableFactory.createUnaryCallable( + getConnectorTransportSettings, settings.getConnectorSettings(), clientContext); + this.createConnectorCallable = + callableFactory.createUnaryCallable( + createConnectorTransportSettings, settings.createConnectorSettings(), clientContext); + this.updateConnectorCallable = + callableFactory.createUnaryCallable( + updateConnectorTransportSettings, settings.updateConnectorSettings(), clientContext); + this.deleteConnectorCallable = + callableFactory.createUnaryCallable( + deleteConnectorTransportSettings, settings.deleteConnectorSettings(), clientContext); + this.pauseConnectorCallable = + callableFactory.createUnaryCallable( + pauseConnectorTransportSettings, settings.pauseConnectorSettings(), clientContext); + this.resumeConnectorCallable = + callableFactory.createUnaryCallable( + resumeConnectorTransportSettings, settings.resumeConnectorSettings(), clientContext); + this.restartConnectorCallable = + callableFactory.createUnaryCallable( + restartConnectorTransportSettings, settings.restartConnectorSettings(), clientContext); + this.stopConnectorCallable = + callableFactory.createUnaryCallable( + stopConnectorTransportSettings, settings.stopConnectorSettings(), clientContext); + this.listLocationsCallable = + callableFactory.createUnaryCallable( + listLocationsTransportSettings, settings.listLocationsSettings(), clientContext); + this.listLocationsPagedCallable = + callableFactory.createPagedCallable( + listLocationsTransportSettings, settings.listLocationsSettings(), clientContext); + this.getLocationCallable = + callableFactory.createUnaryCallable( + getLocationTransportSettings, settings.getLocationSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(listConnectClustersMethodDescriptor); + methodDescriptors.add(getConnectClusterMethodDescriptor); + methodDescriptors.add(createConnectClusterMethodDescriptor); + methodDescriptors.add(updateConnectClusterMethodDescriptor); + methodDescriptors.add(deleteConnectClusterMethodDescriptor); + methodDescriptors.add(listConnectorsMethodDescriptor); + methodDescriptors.add(getConnectorMethodDescriptor); + methodDescriptors.add(createConnectorMethodDescriptor); + methodDescriptors.add(updateConnectorMethodDescriptor); + methodDescriptors.add(deleteConnectorMethodDescriptor); + methodDescriptors.add(pauseConnectorMethodDescriptor); + methodDescriptors.add(resumeConnectorMethodDescriptor); + methodDescriptors.add(restartConnectorMethodDescriptor); + methodDescriptors.add(stopConnectorMethodDescriptor); + methodDescriptors.add(listLocationsMethodDescriptor); + methodDescriptors.add(getLocationMethodDescriptor); + return methodDescriptors; + } + + public HttpJsonOperationsStub getHttpJsonOperationsStub() { + return httpJsonOperationsStub; + } + + @Override + public UnaryCallable + listConnectClustersCallable() { + return listConnectClustersCallable; + } + + @Override + public UnaryCallable + listConnectClustersPagedCallable() { + return listConnectClustersPagedCallable; + } + + @Override + public UnaryCallable getConnectClusterCallable() { + return getConnectClusterCallable; + } + + @Override + public UnaryCallable createConnectClusterCallable() { + return createConnectClusterCallable; + } + + @Override + public OperationCallable + createConnectClusterOperationCallable() { + return createConnectClusterOperationCallable; + } + + @Override + public UnaryCallable updateConnectClusterCallable() { + return updateConnectClusterCallable; + } + + @Override + public OperationCallable + updateConnectClusterOperationCallable() { + return updateConnectClusterOperationCallable; + } + + @Override + public UnaryCallable deleteConnectClusterCallable() { + return deleteConnectClusterCallable; + } + + @Override + public OperationCallable + deleteConnectClusterOperationCallable() { + return deleteConnectClusterOperationCallable; + } + + @Override + public UnaryCallable listConnectorsCallable() { + return listConnectorsCallable; + } + + @Override + public UnaryCallable + listConnectorsPagedCallable() { + return listConnectorsPagedCallable; + } + + @Override + public UnaryCallable getConnectorCallable() { + return getConnectorCallable; + } + + @Override + public UnaryCallable createConnectorCallable() { + return createConnectorCallable; + } + + @Override + public UnaryCallable updateConnectorCallable() { + return updateConnectorCallable; + } + + @Override + public UnaryCallable deleteConnectorCallable() { + return deleteConnectorCallable; + } + + @Override + public UnaryCallable pauseConnectorCallable() { + return pauseConnectorCallable; + } + + @Override + public UnaryCallable resumeConnectorCallable() { + return resumeConnectorCallable; + } + + @Override + public UnaryCallable + restartConnectorCallable() { + return restartConnectorCallable; + } + + @Override + public UnaryCallable stopConnectorCallable() { + return stopConnectorCallable; + } + + @Override + public UnaryCallable listLocationsCallable() { + return listLocationsCallable; + } + + @Override + public UnaryCallable + listLocationsPagedCallable() { + return listLocationsPagedCallable; + } + + @Override + public UnaryCallable getLocationCallable() { + return getLocationCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStub.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStub.java new file mode 100644 index 000000000000..3c3b56a895fb --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStub.java @@ -0,0 +1,176 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersResponse; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ListConnectorsResponse; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the ManagedKafkaConnect service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class ManagedKafkaConnectStub implements BackgroundResource { + + public OperationsStub getOperationsStub() { + return null; + } + + public com.google.api.gax.httpjson.longrunning.stub.OperationsStub getHttpJsonOperationsStub() { + return null; + } + + public UnaryCallable + listConnectClustersPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listConnectClustersPagedCallable()"); + } + + public UnaryCallable + listConnectClustersCallable() { + throw new UnsupportedOperationException("Not implemented: listConnectClustersCallable()"); + } + + public UnaryCallable getConnectClusterCallable() { + throw new UnsupportedOperationException("Not implemented: getConnectClusterCallable()"); + } + + public OperationCallable + createConnectClusterOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: createConnectClusterOperationCallable()"); + } + + public UnaryCallable createConnectClusterCallable() { + throw new UnsupportedOperationException("Not implemented: createConnectClusterCallable()"); + } + + public OperationCallable + updateConnectClusterOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateConnectClusterOperationCallable()"); + } + + public UnaryCallable updateConnectClusterCallable() { + throw new UnsupportedOperationException("Not implemented: updateConnectClusterCallable()"); + } + + public OperationCallable + deleteConnectClusterOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: deleteConnectClusterOperationCallable()"); + } + + public UnaryCallable deleteConnectClusterCallable() { + throw new UnsupportedOperationException("Not implemented: deleteConnectClusterCallable()"); + } + + public UnaryCallable + listConnectorsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listConnectorsPagedCallable()"); + } + + public UnaryCallable listConnectorsCallable() { + throw new UnsupportedOperationException("Not implemented: listConnectorsCallable()"); + } + + public UnaryCallable getConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: getConnectorCallable()"); + } + + public UnaryCallable createConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: createConnectorCallable()"); + } + + public UnaryCallable updateConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: updateConnectorCallable()"); + } + + public UnaryCallable deleteConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: deleteConnectorCallable()"); + } + + public UnaryCallable pauseConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: pauseConnectorCallable()"); + } + + public UnaryCallable resumeConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: resumeConnectorCallable()"); + } + + public UnaryCallable + restartConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: restartConnectorCallable()"); + } + + public UnaryCallable stopConnectorCallable() { + throw new UnsupportedOperationException("Not implemented: stopConnectorCallable()"); + } + + public UnaryCallable + listLocationsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listLocationsPagedCallable()"); + } + + public UnaryCallable listLocationsCallable() { + throw new UnsupportedOperationException("Not implemented: listLocationsCallable()"); + } + + public UnaryCallable getLocationCallable() { + throw new UnsupportedOperationException("Not implemented: getLocationCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStubSettings.java b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStubSettings.java new file mode 100644 index 000000000000..ad9a8f46a943 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/main/java/com/google/cloud/managedkafka/v1/stub/ManagedKafkaConnectStubSettings.java @@ -0,0 +1,1117 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersResponse; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ListConnectorsResponse; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link ManagedKafkaConnectStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (managedkafka.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getConnectCluster: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectStubSettings.Builder managedKafkaConnectSettingsBuilder =
+ *     ManagedKafkaConnectStubSettings.newBuilder();
+ * managedKafkaConnectSettingsBuilder
+ *     .getConnectClusterSettings()
+ *     .setRetrySettings(
+ *         managedKafkaConnectSettingsBuilder
+ *             .getConnectClusterSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * ManagedKafkaConnectStubSettings managedKafkaConnectSettings =
+ *     managedKafkaConnectSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createConnectCluster: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * ManagedKafkaConnectStubSettings.Builder managedKafkaConnectSettingsBuilder =
+ *     ManagedKafkaConnectStubSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * managedKafkaConnectSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class ManagedKafkaConnectStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final PagedCallSettings< + ListConnectClustersRequest, ListConnectClustersResponse, ListConnectClustersPagedResponse> + listConnectClustersSettings; + private final UnaryCallSettings + getConnectClusterSettings; + private final UnaryCallSettings + createConnectClusterSettings; + private final OperationCallSettings< + CreateConnectClusterRequest, ConnectCluster, OperationMetadata> + createConnectClusterOperationSettings; + private final UnaryCallSettings + updateConnectClusterSettings; + private final OperationCallSettings< + UpdateConnectClusterRequest, ConnectCluster, OperationMetadata> + updateConnectClusterOperationSettings; + private final UnaryCallSettings + deleteConnectClusterSettings; + private final OperationCallSettings + deleteConnectClusterOperationSettings; + private final PagedCallSettings< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings; + private final UnaryCallSettings getConnectorSettings; + private final UnaryCallSettings createConnectorSettings; + private final UnaryCallSettings updateConnectorSettings; + private final UnaryCallSettings deleteConnectorSettings; + private final UnaryCallSettings + pauseConnectorSettings; + private final UnaryCallSettings + resumeConnectorSettings; + private final UnaryCallSettings + restartConnectorSettings; + private final UnaryCallSettings + stopConnectorSettings; + private final PagedCallSettings< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse> + listLocationsSettings; + private final UnaryCallSettings getLocationSettings; + + private static final PagedListDescriptor< + ListConnectClustersRequest, ListConnectClustersResponse, ConnectCluster> + LIST_CONNECT_CLUSTERS_PAGE_STR_DESC = + new PagedListDescriptor< + ListConnectClustersRequest, ListConnectClustersResponse, ConnectCluster>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListConnectClustersRequest injectToken( + ListConnectClustersRequest payload, String token) { + return ListConnectClustersRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListConnectClustersRequest injectPageSize( + ListConnectClustersRequest payload, int pageSize) { + return ListConnectClustersRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListConnectClustersRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListConnectClustersResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListConnectClustersResponse payload) { + return payload.getConnectClustersList(); + } + }; + + private static final PagedListDescriptor + LIST_CONNECTORS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListConnectorsRequest injectToken(ListConnectorsRequest payload, String token) { + return ListConnectorsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListConnectorsRequest injectPageSize( + ListConnectorsRequest payload, int pageSize) { + return ListConnectorsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListConnectorsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListConnectorsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListConnectorsResponse payload) { + return payload.getConnectorsList(); + } + }; + + private static final PagedListDescriptor + LIST_LOCATIONS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListLocationsRequest injectToken(ListLocationsRequest payload, String token) { + return ListLocationsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListLocationsRequest injectPageSize(ListLocationsRequest payload, int pageSize) { + return ListLocationsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListLocationsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListLocationsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListLocationsResponse payload) { + return payload.getLocationsList(); + } + }; + + private static final PagedListResponseFactory< + ListConnectClustersRequest, ListConnectClustersResponse, ListConnectClustersPagedResponse> + LIST_CONNECT_CLUSTERS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListConnectClustersRequest, + ListConnectClustersResponse, + ListConnectClustersPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListConnectClustersRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_CONNECT_CLUSTERS_PAGE_STR_DESC, request, context); + return ListConnectClustersPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + LIST_CONNECTORS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListConnectorsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_CONNECTORS_PAGE_STR_DESC, request, context); + return ListConnectorsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse> + LIST_LOCATIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListLocationsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_LOCATIONS_PAGE_STR_DESC, request, context); + return ListLocationsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to listConnectClusters. */ + public PagedCallSettings< + ListConnectClustersRequest, ListConnectClustersResponse, ListConnectClustersPagedResponse> + listConnectClustersSettings() { + return listConnectClustersSettings; + } + + /** Returns the object with the settings used for calls to getConnectCluster. */ + public UnaryCallSettings getConnectClusterSettings() { + return getConnectClusterSettings; + } + + /** Returns the object with the settings used for calls to createConnectCluster. */ + public UnaryCallSettings createConnectClusterSettings() { + return createConnectClusterSettings; + } + + /** Returns the object with the settings used for calls to createConnectCluster. */ + public OperationCallSettings + createConnectClusterOperationSettings() { + return createConnectClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to updateConnectCluster. */ + public UnaryCallSettings updateConnectClusterSettings() { + return updateConnectClusterSettings; + } + + /** Returns the object with the settings used for calls to updateConnectCluster. */ + public OperationCallSettings + updateConnectClusterOperationSettings() { + return updateConnectClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteConnectCluster. */ + public UnaryCallSettings deleteConnectClusterSettings() { + return deleteConnectClusterSettings; + } + + /** Returns the object with the settings used for calls to deleteConnectCluster. */ + public OperationCallSettings + deleteConnectClusterOperationSettings() { + return deleteConnectClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to listConnectors. */ + public PagedCallSettings< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings() { + return listConnectorsSettings; + } + + /** Returns the object with the settings used for calls to getConnector. */ + public UnaryCallSettings getConnectorSettings() { + return getConnectorSettings; + } + + /** Returns the object with the settings used for calls to createConnector. */ + public UnaryCallSettings createConnectorSettings() { + return createConnectorSettings; + } + + /** Returns the object with the settings used for calls to updateConnector. */ + public UnaryCallSettings updateConnectorSettings() { + return updateConnectorSettings; + } + + /** Returns the object with the settings used for calls to deleteConnector. */ + public UnaryCallSettings deleteConnectorSettings() { + return deleteConnectorSettings; + } + + /** Returns the object with the settings used for calls to pauseConnector. */ + public UnaryCallSettings pauseConnectorSettings() { + return pauseConnectorSettings; + } + + /** Returns the object with the settings used for calls to resumeConnector. */ + public UnaryCallSettings + resumeConnectorSettings() { + return resumeConnectorSettings; + } + + /** Returns the object with the settings used for calls to restartConnector. */ + public UnaryCallSettings + restartConnectorSettings() { + return restartConnectorSettings; + } + + /** Returns the object with the settings used for calls to stopConnector. */ + public UnaryCallSettings stopConnectorSettings() { + return stopConnectorSettings; + } + + /** Returns the object with the settings used for calls to listLocations. */ + public PagedCallSettings + listLocationsSettings() { + return listLocationsSettings; + } + + /** Returns the object with the settings used for calls to getLocation. */ + public UnaryCallSettings getLocationSettings() { + return getLocationSettings; + } + + public ManagedKafkaConnectStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcManagedKafkaConnectStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonManagedKafkaConnectStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "managedkafka"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "managedkafka.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "managedkafka.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(ManagedKafkaConnectStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(ManagedKafkaConnectStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ManagedKafkaConnectStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected ManagedKafkaConnectStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + listConnectClustersSettings = settingsBuilder.listConnectClustersSettings().build(); + getConnectClusterSettings = settingsBuilder.getConnectClusterSettings().build(); + createConnectClusterSettings = settingsBuilder.createConnectClusterSettings().build(); + createConnectClusterOperationSettings = + settingsBuilder.createConnectClusterOperationSettings().build(); + updateConnectClusterSettings = settingsBuilder.updateConnectClusterSettings().build(); + updateConnectClusterOperationSettings = + settingsBuilder.updateConnectClusterOperationSettings().build(); + deleteConnectClusterSettings = settingsBuilder.deleteConnectClusterSettings().build(); + deleteConnectClusterOperationSettings = + settingsBuilder.deleteConnectClusterOperationSettings().build(); + listConnectorsSettings = settingsBuilder.listConnectorsSettings().build(); + getConnectorSettings = settingsBuilder.getConnectorSettings().build(); + createConnectorSettings = settingsBuilder.createConnectorSettings().build(); + updateConnectorSettings = settingsBuilder.updateConnectorSettings().build(); + deleteConnectorSettings = settingsBuilder.deleteConnectorSettings().build(); + pauseConnectorSettings = settingsBuilder.pauseConnectorSettings().build(); + resumeConnectorSettings = settingsBuilder.resumeConnectorSettings().build(); + restartConnectorSettings = settingsBuilder.restartConnectorSettings().build(); + stopConnectorSettings = settingsBuilder.stopConnectorSettings().build(); + listLocationsSettings = settingsBuilder.listLocationsSettings().build(); + getLocationSettings = settingsBuilder.getLocationSettings().build(); + } + + /** Builder for ManagedKafkaConnectStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final PagedCallSettings.Builder< + ListConnectClustersRequest, + ListConnectClustersResponse, + ListConnectClustersPagedResponse> + listConnectClustersSettings; + private final UnaryCallSettings.Builder + getConnectClusterSettings; + private final UnaryCallSettings.Builder + createConnectClusterSettings; + private final OperationCallSettings.Builder< + CreateConnectClusterRequest, ConnectCluster, OperationMetadata> + createConnectClusterOperationSettings; + private final UnaryCallSettings.Builder + updateConnectClusterSettings; + private final OperationCallSettings.Builder< + UpdateConnectClusterRequest, ConnectCluster, OperationMetadata> + updateConnectClusterOperationSettings; + private final UnaryCallSettings.Builder + deleteConnectClusterSettings; + private final OperationCallSettings.Builder< + DeleteConnectClusterRequest, Empty, OperationMetadata> + deleteConnectClusterOperationSettings; + private final PagedCallSettings.Builder< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings; + private final UnaryCallSettings.Builder getConnectorSettings; + private final UnaryCallSettings.Builder + createConnectorSettings; + private final UnaryCallSettings.Builder + updateConnectorSettings; + private final UnaryCallSettings.Builder deleteConnectorSettings; + private final UnaryCallSettings.Builder + pauseConnectorSettings; + private final UnaryCallSettings.Builder + resumeConnectorSettings; + private final UnaryCallSettings.Builder + restartConnectorSettings; + private final UnaryCallSettings.Builder + stopConnectorSettings; + private final PagedCallSettings.Builder< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse> + listLocationsSettings; + private final UnaryCallSettings.Builder getLocationSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "no_retry_1_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(10000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("no_retry_1_params", settings); + settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); + definitions.put("no_retry_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + listConnectClustersSettings = + PagedCallSettings.newBuilder(LIST_CONNECT_CLUSTERS_PAGE_STR_FACT); + getConnectClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createConnectClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createConnectClusterOperationSettings = OperationCallSettings.newBuilder(); + updateConnectClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateConnectClusterOperationSettings = OperationCallSettings.newBuilder(); + deleteConnectClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteConnectClusterOperationSettings = OperationCallSettings.newBuilder(); + listConnectorsSettings = PagedCallSettings.newBuilder(LIST_CONNECTORS_PAGE_STR_FACT); + getConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + pauseConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + resumeConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + restartConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + stopConnectorSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listLocationsSettings = PagedCallSettings.newBuilder(LIST_LOCATIONS_PAGE_STR_FACT); + getLocationSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listConnectClustersSettings, + getConnectClusterSettings, + createConnectClusterSettings, + updateConnectClusterSettings, + deleteConnectClusterSettings, + listConnectorsSettings, + getConnectorSettings, + createConnectorSettings, + updateConnectorSettings, + deleteConnectorSettings, + pauseConnectorSettings, + resumeConnectorSettings, + restartConnectorSettings, + stopConnectorSettings, + listLocationsSettings, + getLocationSettings); + initDefaults(this); + } + + protected Builder(ManagedKafkaConnectStubSettings settings) { + super(settings); + + listConnectClustersSettings = settings.listConnectClustersSettings.toBuilder(); + getConnectClusterSettings = settings.getConnectClusterSettings.toBuilder(); + createConnectClusterSettings = settings.createConnectClusterSettings.toBuilder(); + createConnectClusterOperationSettings = + settings.createConnectClusterOperationSettings.toBuilder(); + updateConnectClusterSettings = settings.updateConnectClusterSettings.toBuilder(); + updateConnectClusterOperationSettings = + settings.updateConnectClusterOperationSettings.toBuilder(); + deleteConnectClusterSettings = settings.deleteConnectClusterSettings.toBuilder(); + deleteConnectClusterOperationSettings = + settings.deleteConnectClusterOperationSettings.toBuilder(); + listConnectorsSettings = settings.listConnectorsSettings.toBuilder(); + getConnectorSettings = settings.getConnectorSettings.toBuilder(); + createConnectorSettings = settings.createConnectorSettings.toBuilder(); + updateConnectorSettings = settings.updateConnectorSettings.toBuilder(); + deleteConnectorSettings = settings.deleteConnectorSettings.toBuilder(); + pauseConnectorSettings = settings.pauseConnectorSettings.toBuilder(); + resumeConnectorSettings = settings.resumeConnectorSettings.toBuilder(); + restartConnectorSettings = settings.restartConnectorSettings.toBuilder(); + stopConnectorSettings = settings.stopConnectorSettings.toBuilder(); + listLocationsSettings = settings.listLocationsSettings.toBuilder(); + getLocationSettings = settings.getLocationSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listConnectClustersSettings, + getConnectClusterSettings, + createConnectClusterSettings, + updateConnectClusterSettings, + deleteConnectClusterSettings, + listConnectorsSettings, + getConnectorSettings, + createConnectorSettings, + updateConnectorSettings, + deleteConnectorSettings, + pauseConnectorSettings, + resumeConnectorSettings, + restartConnectorSettings, + stopConnectorSettings, + listLocationsSettings, + getLocationSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .listConnectClustersSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getConnectClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createConnectClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .updateConnectClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .deleteConnectClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .listConnectorsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .updateConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .deleteConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .pauseConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .resumeConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .restartConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .stopConnectorSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")); + + builder + .listLocationsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .getLocationSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .createConnectClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(ConnectCluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(OperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .updateConnectClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(ConnectCluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(OperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .deleteConnectClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_1_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(OperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to listConnectClusters. */ + public PagedCallSettings.Builder< + ListConnectClustersRequest, + ListConnectClustersResponse, + ListConnectClustersPagedResponse> + listConnectClustersSettings() { + return listConnectClustersSettings; + } + + /** Returns the builder for the settings used for calls to getConnectCluster. */ + public UnaryCallSettings.Builder + getConnectClusterSettings() { + return getConnectClusterSettings; + } + + /** Returns the builder for the settings used for calls to createConnectCluster. */ + public UnaryCallSettings.Builder + createConnectClusterSettings() { + return createConnectClusterSettings; + } + + /** Returns the builder for the settings used for calls to createConnectCluster. */ + public OperationCallSettings.Builder< + CreateConnectClusterRequest, ConnectCluster, OperationMetadata> + createConnectClusterOperationSettings() { + return createConnectClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateConnectCluster. */ + public UnaryCallSettings.Builder + updateConnectClusterSettings() { + return updateConnectClusterSettings; + } + + /** Returns the builder for the settings used for calls to updateConnectCluster. */ + public OperationCallSettings.Builder< + UpdateConnectClusterRequest, ConnectCluster, OperationMetadata> + updateConnectClusterOperationSettings() { + return updateConnectClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteConnectCluster. */ + public UnaryCallSettings.Builder + deleteConnectClusterSettings() { + return deleteConnectClusterSettings; + } + + /** Returns the builder for the settings used for calls to deleteConnectCluster. */ + public OperationCallSettings.Builder + deleteConnectClusterOperationSettings() { + return deleteConnectClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to listConnectors. */ + public PagedCallSettings.Builder< + ListConnectorsRequest, ListConnectorsResponse, ListConnectorsPagedResponse> + listConnectorsSettings() { + return listConnectorsSettings; + } + + /** Returns the builder for the settings used for calls to getConnector. */ + public UnaryCallSettings.Builder getConnectorSettings() { + return getConnectorSettings; + } + + /** Returns the builder for the settings used for calls to createConnector. */ + public UnaryCallSettings.Builder createConnectorSettings() { + return createConnectorSettings; + } + + /** Returns the builder for the settings used for calls to updateConnector. */ + public UnaryCallSettings.Builder updateConnectorSettings() { + return updateConnectorSettings; + } + + /** Returns the builder for the settings used for calls to deleteConnector. */ + public UnaryCallSettings.Builder deleteConnectorSettings() { + return deleteConnectorSettings; + } + + /** Returns the builder for the settings used for calls to pauseConnector. */ + public UnaryCallSettings.Builder + pauseConnectorSettings() { + return pauseConnectorSettings; + } + + /** Returns the builder for the settings used for calls to resumeConnector. */ + public UnaryCallSettings.Builder + resumeConnectorSettings() { + return resumeConnectorSettings; + } + + /** Returns the builder for the settings used for calls to restartConnector. */ + public UnaryCallSettings.Builder + restartConnectorSettings() { + return restartConnectorSettings; + } + + /** Returns the builder for the settings used for calls to stopConnector. */ + public UnaryCallSettings.Builder + stopConnectorSettings() { + return stopConnectorSettings; + } + + /** Returns the builder for the settings used for calls to listLocations. */ + public PagedCallSettings.Builder< + ListLocationsRequest, ListLocationsResponse, ListLocationsPagedResponse> + listLocationsSettings() { + return listLocationsSettings; + } + + /** Returns the builder for the settings used for calls to getLocation. */ + public UnaryCallSettings.Builder getLocationSettings() { + return getLocationSettings; + } + + @Override + public ManagedKafkaConnectStubSettings build() throws IOException { + return new ManagedKafkaConnectStubSettings(this); + } + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/main/resources/META-INF/native-image/com.google.cloud.managedkafka.v1/reflect-config.json b/java-managedkafka/google-cloud-managedkafka/src/main/resources/META-INF/native-image/com.google.cloud.managedkafka.v1/reflect-config.json index 870210f570f8..39745e516c12 100644 --- a/java-managedkafka/google-cloud-managedkafka/src/main/resources/META-INF/native-image/com.google.cloud.managedkafka.v1/reflect-config.json +++ b/java-managedkafka/google-cloud-managedkafka/src/main/resources/META-INF/native-image/com.google.cloud.managedkafka.v1/reflect-config.json @@ -593,6 +593,114 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectAccessConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectAccessConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectCluster", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectCluster$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectCluster$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectGcpConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectGcpConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectNetworkConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ConnectNetworkConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.Connector", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.Connector$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.Connector$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.ConsumerGroup", "queryAllDeclaredConstructors": true, @@ -665,6 +773,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.CreateConnectClusterRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.CreateConnectClusterRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.CreateConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.CreateConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.CreateTopicRequest", "queryAllDeclaredConstructors": true, @@ -701,6 +845,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.DeleteConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.DeleteConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.DeleteConsumerGroupRequest", "queryAllDeclaredConstructors": true, @@ -773,6 +953,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.GetConnectClusterRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.GetConnectClusterRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.GetConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.GetConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.GetConsumerGroupRequest", "queryAllDeclaredConstructors": true, @@ -845,6 +1061,78 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectClustersRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectClustersRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectClustersResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectClustersResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectorsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectorsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectorsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ListConnectorsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.ListConsumerGroupsRequest", "queryAllDeclaredConstructors": true, @@ -953,6 +1241,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.PauseConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.PauseConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.PauseConnectorResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.PauseConnectorResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.RebalanceConfig", "queryAllDeclaredConstructors": true, @@ -980,6 +1304,132 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.RestartConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.RestartConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.RestartConnectorResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.RestartConnectorResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ResumeConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ResumeConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ResumeConnectorResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.ResumeConnectorResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.StopConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.StopConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.StopConnectorResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.StopConnectorResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.TaskRetryPolicy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.TaskRetryPolicy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.Topic", "queryAllDeclaredConstructors": true, @@ -1016,6 +1466,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.UpdateConnectorRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.cloud.managedkafka.v1.UpdateConnectorRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.managedkafka.v1.UpdateConsumerGroupRequest", "queryAllDeclaredConstructors": true, diff --git a/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientHttpJsonTest.java b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientHttpJsonTest.java new file mode 100644 index 000000000000..1f11ad9a8d5a --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientHttpJsonTest.java @@ -0,0 +1,1502 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.stub.HttpJsonManagedKafkaConnectStub; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class ManagedKafkaConnectClientHttpJsonTest { + private static MockHttpService mockService; + private static ManagedKafkaConnectClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonManagedKafkaConnectStub.getMethodDescriptors(), + ManagedKafkaConnectSettings.getDefaultEndpoint()); + ManagedKafkaConnectSettings settings = + ManagedKafkaConnectSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + ManagedKafkaConnectSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = ManagedKafkaConnectClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void listConnectClustersTest() throws Exception { + ConnectCluster responsesElement = ConnectCluster.newBuilder().build(); + ListConnectClustersResponse expectedResponse = + ListConnectClustersResponse.newBuilder() + .setNextPageToken("") + .addAllConnectClusters(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + + ListConnectClustersPagedResponse pagedListResponse = client.listConnectClusters(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectClustersList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listConnectClustersExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + client.listConnectClusters(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listConnectClustersTest2() throws Exception { + ConnectCluster responsesElement = ConnectCluster.newBuilder().build(); + ListConnectClustersResponse expectedResponse = + ListConnectClustersResponse.newBuilder() + .setNextPageToken("") + .addAllConnectClusters(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-5833/locations/location-5833"; + + ListConnectClustersPagedResponse pagedListResponse = client.listConnectClusters(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectClustersList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listConnectClustersExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-5833/locations/location-5833"; + client.listConnectClusters(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + ConnectClusterName name = ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + ConnectCluster actualResponse = client.getConnectCluster(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getConnectClusterExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.getConnectCluster(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectClusterTest2() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-1113/locations/location-1113/connectClusters/connectCluster-1113"; + + ConnectCluster actualResponse = client.getConnectCluster(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getConnectClusterExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-1113/locations/location-1113/connectClusters/connectCluster-1113"; + client.getConnectCluster(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + + ConnectCluster actualResponse = + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createConnectClusterExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createConnectClusterTest2() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-5833/locations/location-5833"; + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + + ConnectCluster actualResponse = + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createConnectClusterExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-5833/locations/location-5833"; + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void updateConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + ConnectCluster connectCluster = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + ConnectCluster actualResponse = + client.updateConnectClusterAsync(connectCluster, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateConnectClusterExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectCluster connectCluster = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateConnectClusterAsync(connectCluster, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void deleteConnectClusterTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + ConnectClusterName name = ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + client.deleteConnectClusterAsync(name).get(); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteConnectClusterExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.deleteConnectClusterAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void deleteConnectClusterTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String name = + "projects/project-1113/locations/location-1113/connectClusters/connectCluster-1113"; + + client.deleteConnectClusterAsync(name).get(); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteConnectClusterExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-1113/locations/location-1113/connectClusters/connectCluster-1113"; + client.deleteConnectClusterAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void listConnectorsTest() throws Exception { + Connector responsesElement = Connector.newBuilder().build(); + ListConnectorsResponse expectedResponse = + ListConnectorsResponse.newBuilder() + .setNextPageToken("") + .addAllConnectors(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + ListConnectorsPagedResponse pagedListResponse = client.listConnectors(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectorsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listConnectorsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.listConnectors(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listConnectorsTest2() throws Exception { + Connector responsesElement = Connector.newBuilder().build(); + ListConnectorsResponse expectedResponse = + ListConnectorsResponse.newBuilder() + .setNextPageToken("") + .addAllConnectors(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = + "projects/project-4120/locations/location-4120/connectClusters/connectCluster-4120"; + + ListConnectorsPagedResponse pagedListResponse = client.listConnectors(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectorsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listConnectorsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = + "projects/project-4120/locations/location-4120/connectClusters/connectCluster-4120"; + client.listConnectors(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + Connector actualResponse = client.getConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.getConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectorTest2() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + Connector actualResponse = client.getConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.getConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + + Connector actualResponse = client.createConnector(parent, connector, connectorId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + client.createConnector(parent, connector, connectorId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectorTest2() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = + "projects/project-4120/locations/location-4120/connectClusters/connectCluster-4120"; + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + + Connector actualResponse = client.createConnector(parent, connector, connectorId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = + "projects/project-4120/locations/location-4120/connectClusters/connectCluster-4120"; + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + client.createConnector(parent, connector, connectorId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockService.addResponse(expectedResponse); + + Connector connector = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Connector actualResponse = client.updateConnector(connector, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + Connector connector = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateConnector(connector, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteConnectorTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + client.deleteConnector(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.deleteConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteConnectorTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + client.deleteConnector(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.deleteConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseConnectorTest() throws Exception { + PauseConnectorResponse expectedResponse = PauseConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + PauseConnectorResponse actualResponse = client.pauseConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void pauseConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.pauseConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseConnectorTest2() throws Exception { + PauseConnectorResponse expectedResponse = PauseConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + PauseConnectorResponse actualResponse = client.pauseConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void pauseConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.pauseConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeConnectorTest() throws Exception { + ResumeConnectorResponse expectedResponse = ResumeConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + ResumeConnectorResponse actualResponse = client.resumeConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void resumeConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.resumeConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeConnectorTest2() throws Exception { + ResumeConnectorResponse expectedResponse = ResumeConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + ResumeConnectorResponse actualResponse = client.resumeConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void resumeConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.resumeConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restartConnectorTest() throws Exception { + RestartConnectorResponse expectedResponse = RestartConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + RestartConnectorResponse actualResponse = client.restartConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restartConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.restartConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restartConnectorTest2() throws Exception { + RestartConnectorResponse expectedResponse = RestartConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + RestartConnectorResponse actualResponse = client.restartConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restartConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.restartConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void stopConnectorTest() throws Exception { + StopConnectorResponse expectedResponse = StopConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + StopConnectorResponse actualResponse = client.stopConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void stopConnectorExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.stopConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void stopConnectorTest2() throws Exception { + StopConnectorResponse expectedResponse = StopConnectorResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + + StopConnectorResponse actualResponse = client.stopConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void stopConnectorExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-5161/locations/location-5161/connectClusters/connectCluster-5161/connectors/connector-5161"; + client.stopConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listLocationsTest() throws Exception { + Location responsesElement = Location.newBuilder().build(); + ListLocationsResponse expectedResponse = + ListLocationsResponse.newBuilder() + .setNextPageToken("") + .addAllLocations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("projects/project-3664") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + + ListLocationsPagedResponse pagedListResponse = client.listLocations(request); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getLocationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listLocationsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("projects/project-3664") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + client.listLocations(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getLocationTest() throws Exception { + Location expectedResponse = + Location.newBuilder() + .setName("name3373707") + .setLocationId("locationId1541836720") + .setDisplayName("displayName1714148973") + .putAllLabels(new HashMap()) + .setMetadata(Any.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + GetLocationRequest request = + GetLocationRequest.newBuilder() + .setName("projects/project-9062/locations/location-9062") + .build(); + + Location actualResponse = client.getLocation(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getLocationExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + GetLocationRequest request = + GetLocationRequest.newBuilder() + .setName("projects/project-9062/locations/location-9062") + .build(); + client.getLocation(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientTest.java b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientTest.java new file mode 100644 index 000000000000..090977748127 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectClientTest.java @@ -0,0 +1,1316 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectClustersPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListConnectorsPagedResponse; +import static com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient.ListLocationsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class ManagedKafkaConnectClientTest { + private static MockLocations mockLocations; + private static MockManagedKafkaConnect mockManagedKafkaConnect; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private ManagedKafkaConnectClient client; + + @BeforeClass + public static void startStaticServer() { + mockManagedKafkaConnect = new MockManagedKafkaConnect(); + mockLocations = new MockLocations(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), + Arrays.asList(mockManagedKafkaConnect, mockLocations)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + ManagedKafkaConnectSettings settings = + ManagedKafkaConnectSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = ManagedKafkaConnectClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void listConnectClustersTest() throws Exception { + ConnectCluster responsesElement = ConnectCluster.newBuilder().build(); + ListConnectClustersResponse expectedResponse = + ListConnectClustersResponse.newBuilder() + .setNextPageToken("") + .addAllConnectClusters(Arrays.asList(responsesElement)) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + + ListConnectClustersPagedResponse pagedListResponse = client.listConnectClusters(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectClustersList().get(0), resources.get(0)); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListConnectClustersRequest actualRequest = ((ListConnectClustersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listConnectClustersExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + client.listConnectClusters(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listConnectClustersTest2() throws Exception { + ConnectCluster responsesElement = ConnectCluster.newBuilder().build(); + ListConnectClustersResponse expectedResponse = + ListConnectClustersResponse.newBuilder() + .setNextPageToken("") + .addAllConnectClusters(Arrays.asList(responsesElement)) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListConnectClustersPagedResponse pagedListResponse = client.listConnectClusters(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectClustersList().get(0), resources.get(0)); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListConnectClustersRequest actualRequest = ((ListConnectClustersRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listConnectClustersExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String parent = "parent-995424086"; + client.listConnectClusters(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectClusterName name = ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + ConnectCluster actualResponse = client.getConnectCluster(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetConnectClusterRequest actualRequest = ((GetConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getConnectClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.getConnectCluster(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectClusterTest2() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + ConnectCluster actualResponse = client.getConnectCluster(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetConnectClusterRequest actualRequest = ((GetConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getConnectClusterExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.getConnectCluster(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockManagedKafkaConnect.addResponse(resultOperation); + + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + + ConnectCluster actualResponse = + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateConnectClusterRequest actualRequest = + ((CreateConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(connectCluster, actualRequest.getConnectCluster()); + Assert.assertEquals(connectClusterId, actualRequest.getConnectClusterId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createConnectClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createConnectClusterTest2() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockManagedKafkaConnect.addResponse(resultOperation); + + String parent = "parent-995424086"; + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + + ConnectCluster actualResponse = + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateConnectClusterRequest actualRequest = + ((CreateConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(connectCluster, actualRequest.getConnectCluster()); + Assert.assertEquals(connectClusterId, actualRequest.getConnectClusterId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createConnectClusterExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String parent = "parent-995424086"; + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + client.createConnectClusterAsync(parent, connectCluster, connectClusterId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateConnectClusterTest() throws Exception { + ConnectCluster expectedResponse = + ConnectCluster.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setKafkaCluster("kafkaCluster-481625100") + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .putAllLabels(new HashMap()) + .setCapacityConfig(CapacityConfig.newBuilder().build()) + .putAllConfig(new HashMap()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockManagedKafkaConnect.addResponse(resultOperation); + + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + ConnectCluster actualResponse = + client.updateConnectClusterAsync(connectCluster, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateConnectClusterRequest actualRequest = + ((UpdateConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(connectCluster, actualRequest.getConnectCluster()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateConnectClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateConnectClusterAsync(connectCluster, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteConnectClusterTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockManagedKafkaConnect.addResponse(resultOperation); + + ConnectClusterName name = ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + client.deleteConnectClusterAsync(name).get(); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteConnectClusterRequest actualRequest = + ((DeleteConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteConnectClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.deleteConnectClusterAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteConnectClusterTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteConnectClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockManagedKafkaConnect.addResponse(resultOperation); + + String name = "name3373707"; + + client.deleteConnectClusterAsync(name).get(); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteConnectClusterRequest actualRequest = + ((DeleteConnectClusterRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteConnectClusterExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.deleteConnectClusterAsync(name).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void listConnectorsTest() throws Exception { + Connector responsesElement = Connector.newBuilder().build(); + ListConnectorsResponse expectedResponse = + ListConnectorsResponse.newBuilder() + .setNextPageToken("") + .addAllConnectors(Arrays.asList(responsesElement)) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + + ListConnectorsPagedResponse pagedListResponse = client.listConnectors(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectorsList().get(0), resources.get(0)); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListConnectorsRequest actualRequest = ((ListConnectorsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listConnectorsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + client.listConnectors(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listConnectorsTest2() throws Exception { + Connector responsesElement = Connector.newBuilder().build(); + ListConnectorsResponse expectedResponse = + ListConnectorsResponse.newBuilder() + .setNextPageToken("") + .addAllConnectors(Arrays.asList(responsesElement)) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListConnectorsPagedResponse pagedListResponse = client.listConnectors(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getConnectorsList().get(0), resources.get(0)); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListConnectorsRequest actualRequest = ((ListConnectorsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listConnectorsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String parent = "parent-995424086"; + client.listConnectors(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + Connector actualResponse = client.getConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetConnectorRequest actualRequest = ((GetConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.getConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getConnectorTest2() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + Connector actualResponse = client.getConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetConnectorRequest actualRequest = ((GetConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.getConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + + Connector actualResponse = client.createConnector(parent, connector, connectorId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateConnectorRequest actualRequest = ((CreateConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(connector, actualRequest.getConnector()); + Assert.assertEquals(connectorId, actualRequest.getConnectorId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + client.createConnector(parent, connector, connectorId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createConnectorTest2() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String parent = "parent-995424086"; + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + + Connector actualResponse = client.createConnector(parent, connector, connectorId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateConnectorRequest actualRequest = ((CreateConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(connector, actualRequest.getConnector()); + Assert.assertEquals(connectorId, actualRequest.getConnectorId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String parent = "parent-995424086"; + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + client.createConnector(parent, connector, connectorId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateConnectorTest() throws Exception { + Connector expectedResponse = + Connector.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .putAllConfigs(new HashMap()) + .build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + Connector connector = Connector.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Connector actualResponse = client.updateConnector(connector, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateConnectorRequest actualRequest = ((UpdateConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(connector, actualRequest.getConnector()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + Connector connector = Connector.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateConnector(connector, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteConnectorTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + client.deleteConnector(name); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteConnectorRequest actualRequest = ((DeleteConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.deleteConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteConnectorTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteConnector(name); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteConnectorRequest actualRequest = ((DeleteConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.deleteConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseConnectorTest() throws Exception { + PauseConnectorResponse expectedResponse = PauseConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + PauseConnectorResponse actualResponse = client.pauseConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PauseConnectorRequest actualRequest = ((PauseConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void pauseConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.pauseConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void pauseConnectorTest2() throws Exception { + PauseConnectorResponse expectedResponse = PauseConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + PauseConnectorResponse actualResponse = client.pauseConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PauseConnectorRequest actualRequest = ((PauseConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void pauseConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.pauseConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeConnectorTest() throws Exception { + ResumeConnectorResponse expectedResponse = ResumeConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + ResumeConnectorResponse actualResponse = client.resumeConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ResumeConnectorRequest actualRequest = ((ResumeConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void resumeConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.resumeConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void resumeConnectorTest2() throws Exception { + ResumeConnectorResponse expectedResponse = ResumeConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + ResumeConnectorResponse actualResponse = client.resumeConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ResumeConnectorRequest actualRequest = ((ResumeConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void resumeConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.resumeConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restartConnectorTest() throws Exception { + RestartConnectorResponse expectedResponse = RestartConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + RestartConnectorResponse actualResponse = client.restartConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestartConnectorRequest actualRequest = ((RestartConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restartConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.restartConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restartConnectorTest2() throws Exception { + RestartConnectorResponse expectedResponse = RestartConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + RestartConnectorResponse actualResponse = client.restartConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestartConnectorRequest actualRequest = ((RestartConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restartConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.restartConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void stopConnectorTest() throws Exception { + StopConnectorResponse expectedResponse = StopConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + + StopConnectorResponse actualResponse = client.stopConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + StopConnectorRequest actualRequest = ((StopConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void stopConnectorExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + client.stopConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void stopConnectorTest2() throws Exception { + StopConnectorResponse expectedResponse = StopConnectorResponse.newBuilder().build(); + mockManagedKafkaConnect.addResponse(expectedResponse); + + String name = "name3373707"; + + StopConnectorResponse actualResponse = client.stopConnector(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockManagedKafkaConnect.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + StopConnectorRequest actualRequest = ((StopConnectorRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void stopConnectorExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockManagedKafkaConnect.addException(exception); + + try { + String name = "name3373707"; + client.stopConnector(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listLocationsTest() throws Exception { + Location responsesElement = Location.newBuilder().build(); + ListLocationsResponse expectedResponse = + ListLocationsResponse.newBuilder() + .setNextPageToken("") + .addAllLocations(Arrays.asList(responsesElement)) + .build(); + mockLocations.addResponse(expectedResponse); + + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("name3373707") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + + ListLocationsPagedResponse pagedListResponse = client.listLocations(request); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getLocationsList().get(0), resources.get(0)); + + List actualRequests = mockLocations.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListLocationsRequest actualRequest = ((ListLocationsRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertEquals(request.getFilter(), actualRequest.getFilter()); + Assert.assertEquals(request.getPageSize(), actualRequest.getPageSize()); + Assert.assertEquals(request.getPageToken(), actualRequest.getPageToken()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listLocationsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockLocations.addException(exception); + + try { + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("name3373707") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + client.listLocations(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getLocationTest() throws Exception { + Location expectedResponse = + Location.newBuilder() + .setName("name3373707") + .setLocationId("locationId1541836720") + .setDisplayName("displayName1714148973") + .putAllLabels(new HashMap()) + .setMetadata(Any.newBuilder().build()) + .build(); + mockLocations.addResponse(expectedResponse); + + GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build(); + + Location actualResponse = client.getLocation(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockLocations.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetLocationRequest actualRequest = ((GetLocationRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getLocationExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockLocations.addException(exception); + + try { + GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build(); + client.getLocation(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnect.java b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnect.java new file mode 100644 index 000000000000..d000d79c3156 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnect.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockManagedKafkaConnect implements MockGrpcService { + private final MockManagedKafkaConnectImpl serviceImpl; + + public MockManagedKafkaConnect() { + serviceImpl = new MockManagedKafkaConnectImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnectImpl.java b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnectImpl.java new file mode 100644 index 000000000000..6df777c96956 --- /dev/null +++ b/java-managedkafka/google-cloud-managedkafka/src/test/java/com/google/cloud/managedkafka/v1/MockManagedKafkaConnectImpl.java @@ -0,0 +1,357 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectGrpc.ManagedKafkaConnectImplBase; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockManagedKafkaConnectImpl extends ManagedKafkaConnectImplBase { + private List requests; + private Queue responses; + + public MockManagedKafkaConnectImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void listConnectClusters( + ListConnectClustersRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListConnectClustersResponse) { + requests.add(request); + responseObserver.onNext(((ListConnectClustersResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListConnectClusters, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListConnectClustersResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getConnectCluster( + GetConnectClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ConnectCluster) { + requests.add(request); + responseObserver.onNext(((ConnectCluster) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetConnectCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ConnectCluster.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createConnectCluster( + CreateConnectClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateConnectCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateConnectCluster( + UpdateConnectClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateConnectCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteConnectCluster( + DeleteConnectClusterRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteConnectCluster, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listConnectors( + ListConnectorsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListConnectorsResponse) { + requests.add(request); + responseObserver.onNext(((ListConnectorsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListConnectors, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListConnectorsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getConnector( + GetConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Connector) { + requests.add(request); + responseObserver.onNext(((Connector) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Connector.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createConnector( + CreateConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Connector) { + requests.add(request); + responseObserver.onNext(((Connector) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Connector.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateConnector( + UpdateConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Connector) { + requests.add(request); + responseObserver.onNext(((Connector) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Connector.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteConnector( + DeleteConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void pauseConnector( + PauseConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof PauseConnectorResponse) { + requests.add(request); + responseObserver.onNext(((PauseConnectorResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method PauseConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + PauseConnectorResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void resumeConnector( + ResumeConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ResumeConnectorResponse) { + requests.add(request); + responseObserver.onNext(((ResumeConnectorResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ResumeConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ResumeConnectorResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void restartConnector( + RestartConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof RestartConnectorResponse) { + requests.add(request); + responseObserver.onNext(((RestartConnectorResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method RestartConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + RestartConnectorResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void stopConnector( + StopConnectorRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof StopConnectorResponse) { + requests.add(request); + responseObserver.onNext(((StopConnectorResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StopConnector, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + StopConnectorResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-managedkafka/grpc-google-cloud-managedkafka-v1/pom.xml b/java-managedkafka/grpc-google-cloud-managedkafka-v1/pom.xml index 39b409f32afd..afe0b6a2f575 100644 --- a/java-managedkafka/grpc-google-cloud-managedkafka-v1/pom.xml +++ b/java-managedkafka/grpc-google-cloud-managedkafka-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1 grpc-google-cloud-managedkafka-v1 GRPC library for google-cloud-managedkafka com.google.cloud google-cloud-managedkafka-parent - 0.16.0 + 0.16.1 diff --git a/java-managedkafka/grpc-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectGrpc.java b/java-managedkafka/grpc-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectGrpc.java new file mode 100644 index 000000000000..04ed36f26b09 --- /dev/null +++ b/java-managedkafka/grpc-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectGrpc.java @@ -0,0 +1,1940 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.managedkafka.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
+ * The service that a client application uses to manage Apache Kafka Connect
+ * clusters and connectors.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: google/cloud/managedkafka/v1/managed_kafka_connect.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class ManagedKafkaConnectGrpc { + + private ManagedKafkaConnectGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.cloud.managedkafka.v1.ManagedKafkaConnect"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectClustersRequest, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse> + getListConnectClustersMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListConnectClusters", + requestType = com.google.cloud.managedkafka.v1.ListConnectClustersRequest.class, + responseType = com.google.cloud.managedkafka.v1.ListConnectClustersResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectClustersRequest, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse> + getListConnectClustersMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectClustersRequest, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse> + getListConnectClustersMethod; + if ((getListConnectClustersMethod = ManagedKafkaConnectGrpc.getListConnectClustersMethod) + == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getListConnectClustersMethod = ManagedKafkaConnectGrpc.getListConnectClustersMethod) + == null) { + ManagedKafkaConnectGrpc.getListConnectClustersMethod = + getListConnectClustersMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListConnectClusters")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("ListConnectClusters")) + .build(); + } + } + } + return getListConnectClustersMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectClusterRequest, + com.google.cloud.managedkafka.v1.ConnectCluster> + getGetConnectClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetConnectCluster", + requestType = com.google.cloud.managedkafka.v1.GetConnectClusterRequest.class, + responseType = com.google.cloud.managedkafka.v1.ConnectCluster.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectClusterRequest, + com.google.cloud.managedkafka.v1.ConnectCluster> + getGetConnectClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectClusterRequest, + com.google.cloud.managedkafka.v1.ConnectCluster> + getGetConnectClusterMethod; + if ((getGetConnectClusterMethod = ManagedKafkaConnectGrpc.getGetConnectClusterMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getGetConnectClusterMethod = ManagedKafkaConnectGrpc.getGetConnectClusterMethod) + == null) { + ManagedKafkaConnectGrpc.getGetConnectClusterMethod = + getGetConnectClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetConnectCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("GetConnectCluster")) + .build(); + } + } + } + return getGetConnectClusterMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest, + com.google.longrunning.Operation> + getCreateConnectClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateConnectCluster", + requestType = com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest, + com.google.longrunning.Operation> + getCreateConnectClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest, + com.google.longrunning.Operation> + getCreateConnectClusterMethod; + if ((getCreateConnectClusterMethod = ManagedKafkaConnectGrpc.getCreateConnectClusterMethod) + == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getCreateConnectClusterMethod = ManagedKafkaConnectGrpc.getCreateConnectClusterMethod) + == null) { + ManagedKafkaConnectGrpc.getCreateConnectClusterMethod = + getCreateConnectClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateConnectCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("CreateConnectCluster")) + .build(); + } + } + } + return getCreateConnectClusterMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest, + com.google.longrunning.Operation> + getUpdateConnectClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateConnectCluster", + requestType = com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest, + com.google.longrunning.Operation> + getUpdateConnectClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest, + com.google.longrunning.Operation> + getUpdateConnectClusterMethod; + if ((getUpdateConnectClusterMethod = ManagedKafkaConnectGrpc.getUpdateConnectClusterMethod) + == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getUpdateConnectClusterMethod = ManagedKafkaConnectGrpc.getUpdateConnectClusterMethod) + == null) { + ManagedKafkaConnectGrpc.getUpdateConnectClusterMethod = + getUpdateConnectClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateConnectCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("UpdateConnectCluster")) + .build(); + } + } + } + return getUpdateConnectClusterMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest, + com.google.longrunning.Operation> + getDeleteConnectClusterMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteConnectCluster", + requestType = com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest, + com.google.longrunning.Operation> + getDeleteConnectClusterMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest, + com.google.longrunning.Operation> + getDeleteConnectClusterMethod; + if ((getDeleteConnectClusterMethod = ManagedKafkaConnectGrpc.getDeleteConnectClusterMethod) + == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getDeleteConnectClusterMethod = ManagedKafkaConnectGrpc.getDeleteConnectClusterMethod) + == null) { + ManagedKafkaConnectGrpc.getDeleteConnectClusterMethod = + getDeleteConnectClusterMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteConnectCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("DeleteConnectCluster")) + .build(); + } + } + } + return getDeleteConnectClusterMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectorsRequest, + com.google.cloud.managedkafka.v1.ListConnectorsResponse> + getListConnectorsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListConnectors", + requestType = com.google.cloud.managedkafka.v1.ListConnectorsRequest.class, + responseType = com.google.cloud.managedkafka.v1.ListConnectorsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectorsRequest, + com.google.cloud.managedkafka.v1.ListConnectorsResponse> + getListConnectorsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ListConnectorsRequest, + com.google.cloud.managedkafka.v1.ListConnectorsResponse> + getListConnectorsMethod; + if ((getListConnectorsMethod = ManagedKafkaConnectGrpc.getListConnectorsMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getListConnectorsMethod = ManagedKafkaConnectGrpc.getListConnectorsMethod) == null) { + ManagedKafkaConnectGrpc.getListConnectorsMethod = + getListConnectorsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListConnectors")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ListConnectorsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ListConnectorsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("ListConnectors")) + .build(); + } + } + } + return getListConnectorsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getGetConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetConnector", + requestType = com.google.cloud.managedkafka.v1.GetConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.Connector.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getGetConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.GetConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getGetConnectorMethod; + if ((getGetConnectorMethod = ManagedKafkaConnectGrpc.getGetConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getGetConnectorMethod = ManagedKafkaConnectGrpc.getGetConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getGetConnectorMethod = + getGetConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.GetConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.Connector.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("GetConnector")) + .build(); + } + } + } + return getGetConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getCreateConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateConnector", + requestType = com.google.cloud.managedkafka.v1.CreateConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.Connector.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getCreateConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.CreateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getCreateConnectorMethod; + if ((getCreateConnectorMethod = ManagedKafkaConnectGrpc.getCreateConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getCreateConnectorMethod = ManagedKafkaConnectGrpc.getCreateConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getCreateConnectorMethod = + getCreateConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.CreateConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.Connector.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("CreateConnector")) + .build(); + } + } + } + return getCreateConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getUpdateConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateConnector", + requestType = com.google.cloud.managedkafka.v1.UpdateConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.Connector.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getUpdateConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.UpdateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector> + getUpdateConnectorMethod; + if ((getUpdateConnectorMethod = ManagedKafkaConnectGrpc.getUpdateConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getUpdateConnectorMethod = ManagedKafkaConnectGrpc.getUpdateConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getUpdateConnectorMethod = + getUpdateConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.Connector.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("UpdateConnector")) + .build(); + } + } + } + return getUpdateConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectorRequest, com.google.protobuf.Empty> + getDeleteConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteConnector", + requestType = com.google.cloud.managedkafka.v1.DeleteConnectorRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectorRequest, com.google.protobuf.Empty> + getDeleteConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.DeleteConnectorRequest, com.google.protobuf.Empty> + getDeleteConnectorMethod; + if ((getDeleteConnectorMethod = ManagedKafkaConnectGrpc.getDeleteConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getDeleteConnectorMethod = ManagedKafkaConnectGrpc.getDeleteConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getDeleteConnectorMethod = + getDeleteConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("DeleteConnector")) + .build(); + } + } + } + return getDeleteConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.PauseConnectorRequest, + com.google.cloud.managedkafka.v1.PauseConnectorResponse> + getPauseConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PauseConnector", + requestType = com.google.cloud.managedkafka.v1.PauseConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.PauseConnectorResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.PauseConnectorRequest, + com.google.cloud.managedkafka.v1.PauseConnectorResponse> + getPauseConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.PauseConnectorRequest, + com.google.cloud.managedkafka.v1.PauseConnectorResponse> + getPauseConnectorMethod; + if ((getPauseConnectorMethod = ManagedKafkaConnectGrpc.getPauseConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getPauseConnectorMethod = ManagedKafkaConnectGrpc.getPauseConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getPauseConnectorMethod = + getPauseConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PauseConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.PauseConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.PauseConnectorResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("PauseConnector")) + .build(); + } + } + } + return getPauseConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ResumeConnectorRequest, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse> + getResumeConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ResumeConnector", + requestType = com.google.cloud.managedkafka.v1.ResumeConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.ResumeConnectorResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ResumeConnectorRequest, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse> + getResumeConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.ResumeConnectorRequest, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse> + getResumeConnectorMethod; + if ((getResumeConnectorMethod = ManagedKafkaConnectGrpc.getResumeConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getResumeConnectorMethod = ManagedKafkaConnectGrpc.getResumeConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getResumeConnectorMethod = + getResumeConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ResumeConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.ResumeConnectorResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("ResumeConnector")) + .build(); + } + } + } + return getResumeConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.RestartConnectorRequest, + com.google.cloud.managedkafka.v1.RestartConnectorResponse> + getRestartConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "RestartConnector", + requestType = com.google.cloud.managedkafka.v1.RestartConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.RestartConnectorResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.RestartConnectorRequest, + com.google.cloud.managedkafka.v1.RestartConnectorResponse> + getRestartConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.RestartConnectorRequest, + com.google.cloud.managedkafka.v1.RestartConnectorResponse> + getRestartConnectorMethod; + if ((getRestartConnectorMethod = ManagedKafkaConnectGrpc.getRestartConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getRestartConnectorMethod = ManagedKafkaConnectGrpc.getRestartConnectorMethod) + == null) { + ManagedKafkaConnectGrpc.getRestartConnectorMethod = + getRestartConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "RestartConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.RestartConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.RestartConnectorResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("RestartConnector")) + .build(); + } + } + } + return getRestartConnectorMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.StopConnectorRequest, + com.google.cloud.managedkafka.v1.StopConnectorResponse> + getStopConnectorMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StopConnector", + requestType = com.google.cloud.managedkafka.v1.StopConnectorRequest.class, + responseType = com.google.cloud.managedkafka.v1.StopConnectorResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.StopConnectorRequest, + com.google.cloud.managedkafka.v1.StopConnectorResponse> + getStopConnectorMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.managedkafka.v1.StopConnectorRequest, + com.google.cloud.managedkafka.v1.StopConnectorResponse> + getStopConnectorMethod; + if ((getStopConnectorMethod = ManagedKafkaConnectGrpc.getStopConnectorMethod) == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + if ((getStopConnectorMethod = ManagedKafkaConnectGrpc.getStopConnectorMethod) == null) { + ManagedKafkaConnectGrpc.getStopConnectorMethod = + getStopConnectorMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "StopConnector")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.StopConnectorRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.managedkafka.v1.StopConnectorResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new ManagedKafkaConnectMethodDescriptorSupplier("StopConnector")) + .build(); + } + } + } + return getStopConnectorMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static ManagedKafkaConnectStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ManagedKafkaConnectStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectStub(channel, callOptions); + } + }; + return ManagedKafkaConnectStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static ManagedKafkaConnectBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ManagedKafkaConnectBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectBlockingStub(channel, callOptions); + } + }; + return ManagedKafkaConnectBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static ManagedKafkaConnectFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ManagedKafkaConnectFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectFutureStub(channel, callOptions); + } + }; + return ManagedKafkaConnectFutureStub.newStub(factory, channel); + } + + /** + * + * + *
+   * The service that a client application uses to manage Apache Kafka Connect
+   * clusters and connectors.
+   * 
+ */ + public interface AsyncService { + + /** + * + * + *
+     * Lists the Kafka Connect clusters in a given project and location.
+     * 
+ */ + default void listConnectClusters( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListConnectClustersMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the properties of a single Kafka Connect cluster.
+     * 
+ */ + default void getConnectCluster( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetConnectClusterMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new Kafka Connect cluster in a given project and location.
+     * 
+ */ + default void createConnectCluster( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateConnectClusterMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates the properties of a single Kafka Connect cluster.
+     * 
+ */ + default void updateConnectCluster( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateConnectClusterMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes a single Connect cluster.
+     * 
+ */ + default void deleteConnectCluster( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteConnectClusterMethod(), responseObserver); + } + + /** + * + * + *
+     * Lists the connectors in a given Connect cluster.
+     * 
+ */ + default void listConnectors( + com.google.cloud.managedkafka.v1.ListConnectorsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListConnectorsMethod(), responseObserver); + } + + /** + * + * + *
+     * Returns the properties of a single connector.
+     * 
+ */ + default void getConnector( + com.google.cloud.managedkafka.v1.GetConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Creates a new connector in a given Connect cluster.
+     * 
+ */ + default void createConnector( + com.google.cloud.managedkafka.v1.CreateConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Updates the properties of a connector.
+     * 
+ */ + default void updateConnector( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Deletes a connector.
+     * 
+ */ + default void deleteConnector( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Pauses the connector and its tasks.
+     * 
+ */ + default void pauseConnector( + com.google.cloud.managedkafka.v1.PauseConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getPauseConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Resumes the connector and its tasks.
+     * 
+ */ + default void resumeConnector( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getResumeConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Restarts the connector.
+     * 
+ */ + default void restartConnector( + com.google.cloud.managedkafka.v1.RestartConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getRestartConnectorMethod(), responseObserver); + } + + /** + * + * + *
+     * Stops the connector.
+     * 
+ */ + default void stopConnector( + com.google.cloud.managedkafka.v1.StopConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getStopConnectorMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service ManagedKafkaConnect. + * + *
+   * The service that a client application uses to manage Apache Kafka Connect
+   * clusters and connectors.
+   * 
+ */ + public abstract static class ManagedKafkaConnectImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return ManagedKafkaConnectGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service ManagedKafkaConnect. + * + *
+   * The service that a client application uses to manage Apache Kafka Connect
+   * clusters and connectors.
+   * 
+ */ + public static final class ManagedKafkaConnectStub + extends io.grpc.stub.AbstractAsyncStub { + private ManagedKafkaConnectStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ManagedKafkaConnectStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectStub(channel, callOptions); + } + + /** + * + * + *
+     * Lists the Kafka Connect clusters in a given project and location.
+     * 
+ */ + public void listConnectClusters( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListConnectClustersMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the properties of a single Kafka Connect cluster.
+     * 
+ */ + public void getConnectCluster( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetConnectClusterMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates a new Kafka Connect cluster in a given project and location.
+     * 
+ */ + public void createConnectCluster( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateConnectClusterMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates the properties of a single Kafka Connect cluster.
+     * 
+ */ + public void updateConnectCluster( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateConnectClusterMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes a single Connect cluster.
+     * 
+ */ + public void deleteConnectCluster( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteConnectClusterMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Lists the connectors in a given Connect cluster.
+     * 
+ */ + public void listConnectors( + com.google.cloud.managedkafka.v1.ListConnectorsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListConnectorsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Returns the properties of a single connector.
+     * 
+ */ + public void getConnector( + com.google.cloud.managedkafka.v1.GetConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Creates a new connector in a given Connect cluster.
+     * 
+ */ + public void createConnector( + com.google.cloud.managedkafka.v1.CreateConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Updates the properties of a connector.
+     * 
+ */ + public void updateConnector( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Deletes a connector.
+     * 
+ */ + public void deleteConnector( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Pauses the connector and its tasks.
+     * 
+ */ + public void pauseConnector( + com.google.cloud.managedkafka.v1.PauseConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPauseConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Resumes the connector and its tasks.
+     * 
+ */ + public void resumeConnector( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getResumeConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Restarts the connector.
+     * 
+ */ + public void restartConnector( + com.google.cloud.managedkafka.v1.RestartConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRestartConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Stops the connector.
+     * 
+ */ + public void stopConnector( + com.google.cloud.managedkafka.v1.StopConnectorRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getStopConnectorMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service ManagedKafkaConnect. + * + *
+   * The service that a client application uses to manage Apache Kafka Connect
+   * clusters and connectors.
+   * 
+ */ + public static final class ManagedKafkaConnectBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private ManagedKafkaConnectBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ManagedKafkaConnectBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Lists the Kafka Connect clusters in a given project and location.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.ListConnectClustersResponse listConnectClusters( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListConnectClustersMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the properties of a single Kafka Connect cluster.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetConnectClusterMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new Kafka Connect cluster in a given project and location.
+     * 
+ */ + public com.google.longrunning.Operation createConnectCluster( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateConnectClusterMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the properties of a single Kafka Connect cluster.
+     * 
+ */ + public com.google.longrunning.Operation updateConnectCluster( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateConnectClusterMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes a single Connect cluster.
+     * 
+ */ + public com.google.longrunning.Operation deleteConnectCluster( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteConnectClusterMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Lists the connectors in a given Connect cluster.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.ListConnectorsResponse listConnectors( + com.google.cloud.managedkafka.v1.ListConnectorsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListConnectorsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Returns the properties of a single connector.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.Connector getConnector( + com.google.cloud.managedkafka.v1.GetConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Creates a new connector in a given Connect cluster.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.Connector createConnector( + com.google.cloud.managedkafka.v1.CreateConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Updates the properties of a connector.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.Connector updateConnector( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Deletes a connector.
+     * 
+ */ + public com.google.protobuf.Empty deleteConnector( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Pauses the connector and its tasks.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.PauseConnectorResponse pauseConnector( + com.google.cloud.managedkafka.v1.PauseConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPauseConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Resumes the connector and its tasks.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.ResumeConnectorResponse resumeConnector( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getResumeConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Restarts the connector.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.RestartConnectorResponse restartConnector( + com.google.cloud.managedkafka.v1.RestartConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRestartConnectorMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Stops the connector.
+     * 
+ */ + public com.google.cloud.managedkafka.v1.StopConnectorResponse stopConnector( + com.google.cloud.managedkafka.v1.StopConnectorRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getStopConnectorMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service ManagedKafkaConnect. + * + *
+   * The service that a client application uses to manage Apache Kafka Connect
+   * clusters and connectors.
+   * 
+ */ + public static final class ManagedKafkaConnectFutureStub + extends io.grpc.stub.AbstractFutureStub { + private ManagedKafkaConnectFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ManagedKafkaConnectFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ManagedKafkaConnectFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Lists the Kafka Connect clusters in a given project and location.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.ListConnectClustersResponse> + listConnectClusters(com.google.cloud.managedkafka.v1.ListConnectClustersRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListConnectClustersMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns the properties of a single Kafka Connect cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.ConnectCluster> + getConnectCluster(com.google.cloud.managedkafka.v1.GetConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetConnectClusterMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new Kafka Connect cluster in a given project and location.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + createConnectCluster(com.google.cloud.managedkafka.v1.CreateConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateConnectClusterMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates the properties of a single Kafka Connect cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + updateConnectCluster(com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateConnectClusterMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes a single Connect cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteConnectCluster(com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteConnectClusterMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Lists the connectors in a given Connect cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.ListConnectorsResponse> + listConnectors(com.google.cloud.managedkafka.v1.ListConnectorsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListConnectorsMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Returns the properties of a single connector.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.Connector> + getConnector(com.google.cloud.managedkafka.v1.GetConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Creates a new connector in a given Connect cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.Connector> + createConnector(com.google.cloud.managedkafka.v1.CreateConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Updates the properties of a connector.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.Connector> + updateConnector(com.google.cloud.managedkafka.v1.UpdateConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Deletes a connector.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture + deleteConnector(com.google.cloud.managedkafka.v1.DeleteConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Pauses the connector and its tasks.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.PauseConnectorResponse> + pauseConnector(com.google.cloud.managedkafka.v1.PauseConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPauseConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Resumes the connector and its tasks.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.ResumeConnectorResponse> + resumeConnector(com.google.cloud.managedkafka.v1.ResumeConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getResumeConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Restarts the connector.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.RestartConnectorResponse> + restartConnector(com.google.cloud.managedkafka.v1.RestartConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRestartConnectorMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Stops the connector.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.managedkafka.v1.StopConnectorResponse> + stopConnector(com.google.cloud.managedkafka.v1.StopConnectorRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getStopConnectorMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_LIST_CONNECT_CLUSTERS = 0; + private static final int METHODID_GET_CONNECT_CLUSTER = 1; + private static final int METHODID_CREATE_CONNECT_CLUSTER = 2; + private static final int METHODID_UPDATE_CONNECT_CLUSTER = 3; + private static final int METHODID_DELETE_CONNECT_CLUSTER = 4; + private static final int METHODID_LIST_CONNECTORS = 5; + private static final int METHODID_GET_CONNECTOR = 6; + private static final int METHODID_CREATE_CONNECTOR = 7; + private static final int METHODID_UPDATE_CONNECTOR = 8; + private static final int METHODID_DELETE_CONNECTOR = 9; + private static final int METHODID_PAUSE_CONNECTOR = 10; + private static final int METHODID_RESUME_CONNECTOR = 11; + private static final int METHODID_RESTART_CONNECTOR = 12; + private static final int METHODID_STOP_CONNECTOR = 13; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_LIST_CONNECT_CLUSTERS: + serviceImpl.listConnectClusters( + (com.google.cloud.managedkafka.v1.ListConnectClustersRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.managedkafka.v1.ListConnectClustersResponse>) + responseObserver); + break; + case METHODID_GET_CONNECT_CLUSTER: + serviceImpl.getConnectCluster( + (com.google.cloud.managedkafka.v1.GetConnectClusterRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_CONNECT_CLUSTER: + serviceImpl.createConnectCluster( + (com.google.cloud.managedkafka.v1.CreateConnectClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_CONNECT_CLUSTER: + serviceImpl.updateConnectCluster( + (com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_CONNECT_CLUSTER: + serviceImpl.deleteConnectCluster( + (com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_CONNECTORS: + serviceImpl.listConnectors( + (com.google.cloud.managedkafka.v1.ListConnectorsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_CONNECTOR: + serviceImpl.getConnector( + (com.google.cloud.managedkafka.v1.GetConnectorRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_CONNECTOR: + serviceImpl.createConnector( + (com.google.cloud.managedkafka.v1.CreateConnectorRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_CONNECTOR: + serviceImpl.updateConnector( + (com.google.cloud.managedkafka.v1.UpdateConnectorRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_CONNECTOR: + serviceImpl.deleteConnector( + (com.google.cloud.managedkafka.v1.DeleteConnectorRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PAUSE_CONNECTOR: + serviceImpl.pauseConnector( + (com.google.cloud.managedkafka.v1.PauseConnectorRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_RESUME_CONNECTOR: + serviceImpl.resumeConnector( + (com.google.cloud.managedkafka.v1.ResumeConnectorRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.managedkafka.v1.ResumeConnectorResponse>) + responseObserver); + break; + case METHODID_RESTART_CONNECTOR: + serviceImpl.restartConnector( + (com.google.cloud.managedkafka.v1.RestartConnectorRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.managedkafka.v1.RestartConnectorResponse>) + responseObserver); + break; + case METHODID_STOP_CONNECTOR: + serviceImpl.stopConnector( + (com.google.cloud.managedkafka.v1.StopConnectorRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getListConnectClustersMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.ListConnectClustersRequest, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse>( + service, METHODID_LIST_CONNECT_CLUSTERS))) + .addMethod( + getGetConnectClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.GetConnectClusterRequest, + com.google.cloud.managedkafka.v1.ConnectCluster>( + service, METHODID_GET_CONNECT_CLUSTER))) + .addMethod( + getCreateConnectClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_CONNECT_CLUSTER))) + .addMethod( + getUpdateConnectClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_CONNECT_CLUSTER))) + .addMethod( + getDeleteConnectClusterMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest, + com.google.longrunning.Operation>(service, METHODID_DELETE_CONNECT_CLUSTER))) + .addMethod( + getListConnectorsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.ListConnectorsRequest, + com.google.cloud.managedkafka.v1.ListConnectorsResponse>( + service, METHODID_LIST_CONNECTORS))) + .addMethod( + getGetConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.GetConnectorRequest, + com.google.cloud.managedkafka.v1.Connector>(service, METHODID_GET_CONNECTOR))) + .addMethod( + getCreateConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.CreateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector>( + service, METHODID_CREATE_CONNECTOR))) + .addMethod( + getUpdateConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.UpdateConnectorRequest, + com.google.cloud.managedkafka.v1.Connector>( + service, METHODID_UPDATE_CONNECTOR))) + .addMethod( + getDeleteConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.DeleteConnectorRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_CONNECTOR))) + .addMethod( + getPauseConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.PauseConnectorRequest, + com.google.cloud.managedkafka.v1.PauseConnectorResponse>( + service, METHODID_PAUSE_CONNECTOR))) + .addMethod( + getResumeConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.ResumeConnectorRequest, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse>( + service, METHODID_RESUME_CONNECTOR))) + .addMethod( + getRestartConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.RestartConnectorRequest, + com.google.cloud.managedkafka.v1.RestartConnectorResponse>( + service, METHODID_RESTART_CONNECTOR))) + .addMethod( + getStopConnectorMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.cloud.managedkafka.v1.StopConnectorRequest, + com.google.cloud.managedkafka.v1.StopConnectorResponse>( + service, METHODID_STOP_CONNECTOR))) + .build(); + } + + private abstract static class ManagedKafkaConnectBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + ManagedKafkaConnectBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("ManagedKafkaConnect"); + } + } + + private static final class ManagedKafkaConnectFileDescriptorSupplier + extends ManagedKafkaConnectBaseDescriptorSupplier { + ManagedKafkaConnectFileDescriptorSupplier() {} + } + + private static final class ManagedKafkaConnectMethodDescriptorSupplier + extends ManagedKafkaConnectBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + ManagedKafkaConnectMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (ManagedKafkaConnectGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new ManagedKafkaConnectFileDescriptorSupplier()) + .addMethod(getListConnectClustersMethod()) + .addMethod(getGetConnectClusterMethod()) + .addMethod(getCreateConnectClusterMethod()) + .addMethod(getUpdateConnectClusterMethod()) + .addMethod(getDeleteConnectClusterMethod()) + .addMethod(getListConnectorsMethod()) + .addMethod(getGetConnectorMethod()) + .addMethod(getCreateConnectorMethod()) + .addMethod(getUpdateConnectorMethod()) + .addMethod(getDeleteConnectorMethod()) + .addMethod(getPauseConnectorMethod()) + .addMethod(getResumeConnectorMethod()) + .addMethod(getRestartConnectorMethod()) + .addMethod(getStopConnectorMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-managedkafka/pom.xml b/java-managedkafka/pom.xml index 8bb13fb25651..799600704224 100644 --- a/java-managedkafka/pom.xml +++ b/java-managedkafka/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-managedkafka-parent pom - 0.16.0 + 0.16.1 Google Managed Service for Apache Kafka API Parent Java idiomatic client for Google Cloud Platform services. @@ -13,7 +13,7 @@ com.google.cloud google-cloud-jar-parent - 1.54.0 + 1.54.2 ../google-cloud-jar-parent/pom.xml @@ -29,17 +29,17 @@ com.google.cloud google-cloud-managedkafka - 0.16.0 + 0.16.1 com.google.api.grpc grpc-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1 com.google.api.grpc proto-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1
diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/pom.xml b/java-managedkafka/proto-google-cloud-managedkafka-v1/pom.xml index d0d46a2ba5c4..9fb338ea11af 100644 --- a/java-managedkafka/proto-google-cloud-managedkafka-v1/pom.xml +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-managedkafka-v1 - 0.16.0 + 0.16.1 proto-google-cloud-managedkafka-v1 Proto library for google-cloud-managedkafka com.google.cloud google-cloud-managedkafka-parent - 0.16.0 + 0.16.1 diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfig.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfig.java new file mode 100644 index 000000000000..7bf0a4af7bbb --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfig.java @@ -0,0 +1,1069 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * The configuration of access to the Kafka Connect cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectAccessConfig} + */ +public final class ConnectAccessConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ConnectAccessConfig) + ConnectAccessConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use ConnectAccessConfig.newBuilder() to construct. + private ConnectAccessConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ConnectAccessConfig() { + networkConfigs_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ConnectAccessConfig(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectAccessConfig.class, + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder.class); + } + + public static final int NETWORK_CONFIGS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List networkConfigs_; + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getNetworkConfigsList() { + return networkConfigs_; + } + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getNetworkConfigsOrBuilderList() { + return networkConfigs_; + } + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getNetworkConfigsCount() { + return networkConfigs_.size(); + } + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig getNetworkConfigs(int index) { + return networkConfigs_.get(index); + } + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder getNetworkConfigsOrBuilder( + int index) { + return networkConfigs_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < networkConfigs_.size(); i++) { + output.writeMessage(1, networkConfigs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < networkConfigs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, networkConfigs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ConnectAccessConfig)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ConnectAccessConfig other = + (com.google.cloud.managedkafka.v1.ConnectAccessConfig) obj; + + if (!getNetworkConfigsList().equals(other.getNetworkConfigsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getNetworkConfigsCount() > 0) { + hash = (37 * hash) + NETWORK_CONFIGS_FIELD_NUMBER; + hash = (53 * hash) + getNetworkConfigsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.ConnectAccessConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * The configuration of access to the Kafka Connect cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectAccessConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ConnectAccessConfig) + com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectAccessConfig.class, + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ConnectAccessConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (networkConfigsBuilder_ == null) { + networkConfigs_ = java.util.Collections.emptyList(); + } else { + networkConfigs_ = null; + networkConfigsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfig getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfig build() { + com.google.cloud.managedkafka.v1.ConnectAccessConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfig buildPartial() { + com.google.cloud.managedkafka.v1.ConnectAccessConfig result = + new com.google.cloud.managedkafka.v1.ConnectAccessConfig(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.managedkafka.v1.ConnectAccessConfig result) { + if (networkConfigsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + networkConfigs_ = java.util.Collections.unmodifiableList(networkConfigs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.networkConfigs_ = networkConfigs_; + } else { + result.networkConfigs_ = networkConfigsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ConnectAccessConfig result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ConnectAccessConfig) { + return mergeFrom((com.google.cloud.managedkafka.v1.ConnectAccessConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ConnectAccessConfig other) { + if (other == com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance()) + return this; + if (networkConfigsBuilder_ == null) { + if (!other.networkConfigs_.isEmpty()) { + if (networkConfigs_.isEmpty()) { + networkConfigs_ = other.networkConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureNetworkConfigsIsMutable(); + networkConfigs_.addAll(other.networkConfigs_); + } + onChanged(); + } + } else { + if (!other.networkConfigs_.isEmpty()) { + if (networkConfigsBuilder_.isEmpty()) { + networkConfigsBuilder_.dispose(); + networkConfigsBuilder_ = null; + networkConfigs_ = other.networkConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + networkConfigsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getNetworkConfigsFieldBuilder() + : null; + } else { + networkConfigsBuilder_.addAllMessages(other.networkConfigs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.managedkafka.v1.ConnectNetworkConfig m = + input.readMessage( + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.parser(), + extensionRegistry); + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + networkConfigs_.add(m); + } else { + networkConfigsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List networkConfigs_ = + java.util.Collections.emptyList(); + + private void ensureNetworkConfigsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + networkConfigs_ = + new java.util.ArrayList( + networkConfigs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectNetworkConfig, + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder> + networkConfigsBuilder_; + + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getNetworkConfigsList() { + if (networkConfigsBuilder_ == null) { + return java.util.Collections.unmodifiableList(networkConfigs_); + } else { + return networkConfigsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getNetworkConfigsCount() { + if (networkConfigsBuilder_ == null) { + return networkConfigs_.size(); + } else { + return networkConfigsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig getNetworkConfigs(int index) { + if (networkConfigsBuilder_ == null) { + return networkConfigs_.get(index); + } else { + return networkConfigsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setNetworkConfigs( + int index, com.google.cloud.managedkafka.v1.ConnectNetworkConfig value) { + if (networkConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNetworkConfigsIsMutable(); + networkConfigs_.set(index, value); + onChanged(); + } else { + networkConfigsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setNetworkConfigs( + int index, com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder builderForValue) { + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + networkConfigs_.set(index, builderForValue.build()); + onChanged(); + } else { + networkConfigsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addNetworkConfigs(com.google.cloud.managedkafka.v1.ConnectNetworkConfig value) { + if (networkConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNetworkConfigsIsMutable(); + networkConfigs_.add(value); + onChanged(); + } else { + networkConfigsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addNetworkConfigs( + int index, com.google.cloud.managedkafka.v1.ConnectNetworkConfig value) { + if (networkConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNetworkConfigsIsMutable(); + networkConfigs_.add(index, value); + onChanged(); + } else { + networkConfigsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addNetworkConfigs( + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder builderForValue) { + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + networkConfigs_.add(builderForValue.build()); + onChanged(); + } else { + networkConfigsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addNetworkConfigs( + int index, com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder builderForValue) { + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + networkConfigs_.add(index, builderForValue.build()); + onChanged(); + } else { + networkConfigsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllNetworkConfigs( + java.lang.Iterable + values) { + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, networkConfigs_); + onChanged(); + } else { + networkConfigsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearNetworkConfigs() { + if (networkConfigsBuilder_ == null) { + networkConfigs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + networkConfigsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeNetworkConfigs(int index) { + if (networkConfigsBuilder_ == null) { + ensureNetworkConfigsIsMutable(); + networkConfigs_.remove(index); + onChanged(); + } else { + networkConfigsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder getNetworkConfigsBuilder( + int index) { + return getNetworkConfigsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder + getNetworkConfigsOrBuilder(int index) { + if (networkConfigsBuilder_ == null) { + return networkConfigs_.get(index); + } else { + return networkConfigsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getNetworkConfigsOrBuilderList() { + if (networkConfigsBuilder_ != null) { + return networkConfigsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(networkConfigs_); + } + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder + addNetworkConfigsBuilder() { + return getNetworkConfigsFieldBuilder() + .addBuilder(com.google.cloud.managedkafka.v1.ConnectNetworkConfig.getDefaultInstance()); + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder addNetworkConfigsBuilder( + int index) { + return getNetworkConfigsFieldBuilder() + .addBuilder( + index, com.google.cloud.managedkafka.v1.ConnectNetworkConfig.getDefaultInstance()); + } + /** + * + * + *
+     * Required.
+     * Virtual Private Cloud (VPC) networks that must be granted direct access to
+     * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+     * networks can be specified.
+     * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getNetworkConfigsBuilderList() { + return getNetworkConfigsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectNetworkConfig, + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder> + getNetworkConfigsFieldBuilder() { + if (networkConfigsBuilder_ == null) { + networkConfigsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectNetworkConfig, + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder>( + networkConfigs_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + networkConfigs_ = null; + } + return networkConfigsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ConnectAccessConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ConnectAccessConfig) + private static final com.google.cloud.managedkafka.v1.ConnectAccessConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ConnectAccessConfig(); + } + + public static com.google.cloud.managedkafka.v1.ConnectAccessConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ConnectAccessConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfigOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfigOrBuilder.java new file mode 100644 index 000000000000..8c5bb22e33e5 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectAccessConfigOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ConnectAccessConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ConnectAccessConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getNetworkConfigsList(); + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectNetworkConfig getNetworkConfigs(int index); + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getNetworkConfigsCount(); + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getNetworkConfigsOrBuilderList(); + /** + * + * + *
+   * Required.
+   * Virtual Private Cloud (VPC) networks that must be granted direct access to
+   * the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10
+   * networks can be specified.
+   * 
+ * + * + * repeated .google.cloud.managedkafka.v1.ConnectNetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder getNetworkConfigsOrBuilder( + int index); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectCluster.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectCluster.java new file mode 100644 index 000000000000..bf1ce512bc1e --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectCluster.java @@ -0,0 +1,3141 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * An Apache Kafka Connect cluster deployed in a location.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectCluster} + */ +public final class ConnectCluster extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ConnectCluster) + ConnectClusterOrBuilder { + private static final long serialVersionUID = 0L; + // Use ConnectCluster.newBuilder() to construct. + private ConnectCluster(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ConnectCluster() { + name_ = ""; + kafkaCluster_ = ""; + state_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ConnectCluster(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetLabels(); + case 9: + return internalGetConfig(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectCluster.class, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder.class); + } + + /** + * + * + *
+   * The state of the cluster.
+   * 
+ * + * Protobuf enum {@code google.cloud.managedkafka.v1.ConnectCluster.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * A state was not specified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
+     * The cluster is being created.
+     * 
+ * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
+     * The cluster is active.
+     * 
+ * + * ACTIVE = 2; + */ + ACTIVE(2), + /** + * + * + *
+     * The cluster is being deleted.
+     * 
+ * + * DELETING = 3; + */ + DELETING(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * A state was not specified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * The cluster is being created.
+     * 
+ * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + /** + * + * + *
+     * The cluster is active.
+     * 
+ * + * ACTIVE = 2; + */ + public static final int ACTIVE_VALUE = 2; + /** + * + * + *
+     * The cluster is being deleted.
+     * 
+ * + * DELETING = 3; + */ + public static final int DELETING_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return ACTIVE; + case 3: + return DELETING; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ConnectCluster.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.managedkafka.v1.ConnectCluster.State) + } + + private int bitField0_; + private int platformConfigCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object platformConfig_; + + public enum PlatformConfigCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + GCP_CONFIG(7), + PLATFORMCONFIG_NOT_SET(0); + private final int value; + + private PlatformConfigCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static PlatformConfigCase valueOf(int value) { + return forNumber(value); + } + + public static PlatformConfigCase forNumber(int value) { + switch (value) { + case 7: + return GCP_CONFIG; + case 0: + return PLATFORMCONFIG_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public PlatformConfigCase getPlatformConfigCase() { + return PlatformConfigCase.forNumber(platformConfigCase_); + } + + public static final int GCP_CONFIG_FIELD_NUMBER = 7; + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the gcpConfig field is set. + */ + @java.lang.Override + public boolean hasGcpConfig() { + return platformConfigCase_ == 7; + } + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The gcpConfig. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig getGcpConfig() { + if (platformConfigCase_ == 7) { + return (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_; + } + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder getGcpConfigOrBuilder() { + if (platformConfigCase_ == 7) { + return (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_; + } + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Identifier. The name of the Kafka Connect cluster. Structured like:
+   * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Identifier. The name of the Kafka Connect cluster. Structured like:
+   * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KAFKA_CLUSTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kafkaCluster_ = ""; + /** + * + * + *
+   * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+   * cluster is attached to. Structured like:
+   * projects/{project}/locations/{location}/clusters/{cluster}
+   * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The kafkaCluster. + */ + @java.lang.Override + public java.lang.String getKafkaCluster() { + java.lang.Object ref = kafkaCluster_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kafkaCluster_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+   * cluster is attached to. Structured like:
+   * projects/{project}/locations/{location}/clusters/{cluster}
+   * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for kafkaCluster. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKafkaClusterBytes() { + java.lang.Object ref = kafkaCluster_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kafkaCluster_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp updateTime_; + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int LABELS_FIELD_NUMBER = 5; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int CAPACITY_CONFIG_FIELD_NUMBER = 6; + private com.google.cloud.managedkafka.v1.CapacityConfig capacityConfig_; + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the capacityConfig field is set. + */ + @java.lang.Override + public boolean hasCapacityConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The capacityConfig. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.CapacityConfig getCapacityConfig() { + return capacityConfig_ == null + ? com.google.cloud.managedkafka.v1.CapacityConfig.getDefaultInstance() + : capacityConfig_; + } + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder getCapacityConfigOrBuilder() { + return capacityConfig_ == null + ? com.google.cloud.managedkafka.v1.CapacityConfig.getDefaultInstance() + : capacityConfig_; + } + + public static final int STATE_FIELD_NUMBER = 8; + private int state_ = 0; + /** + * + * + *
+   * Output only. The current state of the cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+   * Output only. The current state of the cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster.State getState() { + com.google.cloud.managedkafka.v1.ConnectCluster.State result = + com.google.cloud.managedkafka.v1.ConnectCluster.State.forNumber(state_); + return result == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.State.UNRECOGNIZED + : result; + } + + public static final int CONFIG_FIELD_NUMBER = 9; + + private static final class ConfigDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField config_; + + private com.google.protobuf.MapField internalGetConfig() { + if (config_ == null) { + return com.google.protobuf.MapField.emptyMapField(ConfigDefaultEntryHolder.defaultEntry); + } + return config_; + } + + public int getConfigCount() { + return internalGetConfig().getMap().size(); + } + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsConfig(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetConfig().getMap().containsKey(key); + } + /** Use {@link #getConfigMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getConfig() { + return getConfigMap(); + } + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getConfigMap() { + return internalGetConfig().getMap(); + } + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getConfigOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfig().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getConfigOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfig().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kafkaCluster_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, kafkaCluster_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getUpdateTime()); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 5); + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(6, getCapacityConfig()); + } + if (platformConfigCase_ == 7) { + output.writeMessage(7, (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_); + } + if (state_ + != com.google.cloud.managedkafka.v1.ConnectCluster.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(8, state_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetConfig(), ConfigDefaultEntryHolder.defaultEntry, 9); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kafkaCluster_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, kafkaCluster_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getUpdateTime()); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, labels__); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getCapacityConfig()); + } + if (platformConfigCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_); + } + if (state_ + != com.google.cloud.managedkafka.v1.ConnectCluster.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(8, state_); + } + for (java.util.Map.Entry entry : + internalGetConfig().getMap().entrySet()) { + com.google.protobuf.MapEntry config__ = + ConfigDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, config__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ConnectCluster)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ConnectCluster other = + (com.google.cloud.managedkafka.v1.ConnectCluster) obj; + + if (!getName().equals(other.getName())) return false; + if (!getKafkaCluster().equals(other.getKafkaCluster())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (hasCapacityConfig() != other.hasCapacityConfig()) return false; + if (hasCapacityConfig()) { + if (!getCapacityConfig().equals(other.getCapacityConfig())) return false; + } + if (state_ != other.state_) return false; + if (!internalGetConfig().equals(other.internalGetConfig())) return false; + if (!getPlatformConfigCase().equals(other.getPlatformConfigCase())) return false; + switch (platformConfigCase_) { + case 7: + if (!getGcpConfig().equals(other.getGcpConfig())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + KAFKA_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getKafkaCluster().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasCapacityConfig()) { + hash = (37 * hash) + CAPACITY_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCapacityConfig().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + if (!internalGetConfig().getMap().isEmpty()) { + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + internalGetConfig().hashCode(); + } + switch (platformConfigCase_) { + case 7: + hash = (37 * hash) + GCP_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getGcpConfig().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.ConnectCluster prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * An Apache Kafka Connect cluster deployed in a location.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectCluster} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ConnectCluster) + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetLabels(); + case 9: + return internalGetConfig(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetMutableLabels(); + case 9: + return internalGetMutableConfig(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectCluster.class, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ConnectCluster.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreateTimeFieldBuilder(); + getUpdateTimeFieldBuilder(); + getCapacityConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (gcpConfigBuilder_ != null) { + gcpConfigBuilder_.clear(); + } + name_ = ""; + kafkaCluster_ = ""; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + internalGetMutableLabels().clear(); + capacityConfig_ = null; + if (capacityConfigBuilder_ != null) { + capacityConfigBuilder_.dispose(); + capacityConfigBuilder_ = null; + } + state_ = 0; + internalGetMutableConfig().clear(); + platformConfigCase_ = 0; + platformConfig_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster build() { + com.google.cloud.managedkafka.v1.ConnectCluster result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster buildPartial() { + com.google.cloud.managedkafka.v1.ConnectCluster result = + new com.google.cloud.managedkafka.v1.ConnectCluster(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ConnectCluster result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.kafkaCluster_ = kafkaCluster_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.capacityConfig_ = + capacityConfigBuilder_ == null ? capacityConfig_ : capacityConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.config_ = internalGetConfig(); + result.config_.makeImmutable(); + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.cloud.managedkafka.v1.ConnectCluster result) { + result.platformConfigCase_ = platformConfigCase_; + result.platformConfig_ = this.platformConfig_; + if (platformConfigCase_ == 7 && gcpConfigBuilder_ != null) { + result.platformConfig_ = gcpConfigBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ConnectCluster) { + return mergeFrom((com.google.cloud.managedkafka.v1.ConnectCluster) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ConnectCluster other) { + if (other == com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getKafkaCluster().isEmpty()) { + kafkaCluster_ = other.kafkaCluster_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000020; + if (other.hasCapacityConfig()) { + mergeCapacityConfig(other.getCapacityConfig()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + internalGetMutableConfig().mergeFrom(other.internalGetConfig()); + bitField0_ |= 0x00000100; + switch (other.getPlatformConfigCase()) { + case GCP_CONFIG: + { + mergeGcpConfig(other.getGcpConfig()); + break; + } + case PLATFORMCONFIG_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 10 + case 18: + { + kafkaCluster_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 18 + case 26: + { + input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 26 + case 34: + { + input.readMessage(getUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 42: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000020; + break; + } // case 42 + case 50: + { + input.readMessage(getCapacityConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 50 + case 58: + { + input.readMessage(getGcpConfigFieldBuilder().getBuilder(), extensionRegistry); + platformConfigCase_ = 7; + break; + } // case 58 + case 64: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 74: + { + com.google.protobuf.MapEntry config__ = + input.readMessage( + ConfigDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableConfig() + .getMutableMap() + .put(config__.getKey(), config__.getValue()); + bitField0_ |= 0x00000100; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int platformConfigCase_ = 0; + private java.lang.Object platformConfig_; + + public PlatformConfigCase getPlatformConfigCase() { + return PlatformConfigCase.forNumber(platformConfigCase_); + } + + public Builder clearPlatformConfig() { + platformConfigCase_ = 0; + platformConfig_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectGcpConfig, + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder> + gcpConfigBuilder_; + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the gcpConfig field is set. + */ + @java.lang.Override + public boolean hasGcpConfig() { + return platformConfigCase_ == 7; + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The gcpConfig. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig getGcpConfig() { + if (gcpConfigBuilder_ == null) { + if (platformConfigCase_ == 7) { + return (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_; + } + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } else { + if (platformConfigCase_ == 7) { + return gcpConfigBuilder_.getMessage(); + } + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setGcpConfig(com.google.cloud.managedkafka.v1.ConnectGcpConfig value) { + if (gcpConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + platformConfig_ = value; + onChanged(); + } else { + gcpConfigBuilder_.setMessage(value); + } + platformConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setGcpConfig( + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder builderForValue) { + if (gcpConfigBuilder_ == null) { + platformConfig_ = builderForValue.build(); + onChanged(); + } else { + gcpConfigBuilder_.setMessage(builderForValue.build()); + } + platformConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeGcpConfig(com.google.cloud.managedkafka.v1.ConnectGcpConfig value) { + if (gcpConfigBuilder_ == null) { + if (platformConfigCase_ == 7 + && platformConfig_ + != com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance()) { + platformConfig_ = + com.google.cloud.managedkafka.v1.ConnectGcpConfig.newBuilder( + (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + platformConfig_ = value; + } + onChanged(); + } else { + if (platformConfigCase_ == 7) { + gcpConfigBuilder_.mergeFrom(value); + } else { + gcpConfigBuilder_.setMessage(value); + } + } + platformConfigCase_ = 7; + return this; + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearGcpConfig() { + if (gcpConfigBuilder_ == null) { + if (platformConfigCase_ == 7) { + platformConfigCase_ = 0; + platformConfig_ = null; + onChanged(); + } + } else { + if (platformConfigCase_ == 7) { + platformConfigCase_ = 0; + platformConfig_ = null; + } + gcpConfigBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder getGcpConfigBuilder() { + return getGcpConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder getGcpConfigOrBuilder() { + if ((platformConfigCase_ == 7) && (gcpConfigBuilder_ != null)) { + return gcpConfigBuilder_.getMessageOrBuilder(); + } else { + if (platformConfigCase_ == 7) { + return (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_; + } + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + } + /** + * + * + *
+     * Required. Configuration properties for a Kafka Connect cluster deployed
+     * to Google Cloud Platform.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectGcpConfig, + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder> + getGcpConfigFieldBuilder() { + if (gcpConfigBuilder_ == null) { + if (!(platformConfigCase_ == 7)) { + platformConfig_ = com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + gcpConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectGcpConfig, + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder>( + (com.google.cloud.managedkafka.v1.ConnectGcpConfig) platformConfig_, + getParentForChildren(), + isClean()); + platformConfig_ = null; + } + platformConfigCase_ = 7; + onChanged(); + return gcpConfigBuilder_; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Identifier. The name of the Kafka Connect cluster. Structured like:
+     * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Identifier. The name of the Kafka Connect cluster. Structured like:
+     * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Identifier. The name of the Kafka Connect cluster. Structured like:
+     * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Identifier. The name of the Kafka Connect cluster. Structured like:
+     * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Identifier. The name of the Kafka Connect cluster. Structured like:
+     * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object kafkaCluster_ = ""; + /** + * + * + *
+     * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+     * cluster is attached to. Structured like:
+     * projects/{project}/locations/{location}/clusters/{cluster}
+     * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The kafkaCluster. + */ + public java.lang.String getKafkaCluster() { + java.lang.Object ref = kafkaCluster_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kafkaCluster_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+     * cluster is attached to. Structured like:
+     * projects/{project}/locations/{location}/clusters/{cluster}
+     * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for kafkaCluster. + */ + public com.google.protobuf.ByteString getKafkaClusterBytes() { + java.lang.Object ref = kafkaCluster_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kafkaCluster_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+     * cluster is attached to. Structured like:
+     * projects/{project}/locations/{location}/clusters/{cluster}
+     * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The kafkaCluster to set. + * @return This builder for chaining. + */ + public Builder setKafkaCluster(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kafkaCluster_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+     * cluster is attached to. Structured like:
+     * projects/{project}/locations/{location}/clusters/{cluster}
+     * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearKafkaCluster() { + kafkaCluster_ = getDefaultInstance().getKafkaCluster(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+     * cluster is attached to. Structured like:
+     * projects/{project}/locations/{location}/clusters/{cluster}
+     * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The bytes for kafkaCluster to set. + * @return This builder for chaining. + */ + public Builder setKafkaClusterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kafkaCluster_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + /** + * + * + *
+     * Output only. The time when the cluster was created.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000010) != 0); + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000010); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getUpdateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + /** + * + * + *
+     * Output only. The time when the cluster was last updated.
+     * 
+ * + * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000020; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000020; + return internalGetMutableLabels().getMutableMap(); + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000020; + return this; + } + /** + * + * + *
+     * Optional. Labels as key value pairs.
+     * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000020; + return this; + } + + private com.google.cloud.managedkafka.v1.CapacityConfig capacityConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.CapacityConfig, + com.google.cloud.managedkafka.v1.CapacityConfig.Builder, + com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder> + capacityConfigBuilder_; + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the capacityConfig field is set. + */ + public boolean hasCapacityConfig() { + return ((bitField0_ & 0x00000040) != 0); + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The capacityConfig. + */ + public com.google.cloud.managedkafka.v1.CapacityConfig getCapacityConfig() { + if (capacityConfigBuilder_ == null) { + return capacityConfig_ == null + ? com.google.cloud.managedkafka.v1.CapacityConfig.getDefaultInstance() + : capacityConfig_; + } else { + return capacityConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setCapacityConfig(com.google.cloud.managedkafka.v1.CapacityConfig value) { + if (capacityConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + capacityConfig_ = value; + } else { + capacityConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setCapacityConfig( + com.google.cloud.managedkafka.v1.CapacityConfig.Builder builderForValue) { + if (capacityConfigBuilder_ == null) { + capacityConfig_ = builderForValue.build(); + } else { + capacityConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeCapacityConfig(com.google.cloud.managedkafka.v1.CapacityConfig value) { + if (capacityConfigBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && capacityConfig_ != null + && capacityConfig_ + != com.google.cloud.managedkafka.v1.CapacityConfig.getDefaultInstance()) { + getCapacityConfigBuilder().mergeFrom(value); + } else { + capacityConfig_ = value; + } + } else { + capacityConfigBuilder_.mergeFrom(value); + } + if (capacityConfig_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearCapacityConfig() { + bitField0_ = (bitField0_ & ~0x00000040); + capacityConfig_ = null; + if (capacityConfigBuilder_ != null) { + capacityConfigBuilder_.dispose(); + capacityConfigBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.CapacityConfig.Builder getCapacityConfigBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getCapacityConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder getCapacityConfigOrBuilder() { + if (capacityConfigBuilder_ != null) { + return capacityConfigBuilder_.getMessageOrBuilder(); + } else { + return capacityConfig_ == null + ? com.google.cloud.managedkafka.v1.CapacityConfig.getDefaultInstance() + : capacityConfig_; + } + } + /** + * + * + *
+     * Required. Capacity configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.CapacityConfig, + com.google.cloud.managedkafka.v1.CapacityConfig.Builder, + com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder> + getCapacityConfigFieldBuilder() { + if (capacityConfigBuilder_ == null) { + capacityConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.CapacityConfig, + com.google.cloud.managedkafka.v1.CapacityConfig.Builder, + com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder>( + getCapacityConfig(), getParentForChildren(), isClean()); + capacityConfig_ = null; + } + return capacityConfigBuilder_; + } + + private int state_ = 0; + /** + * + * + *
+     * Output only. The current state of the cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+     * Output only. The current state of the cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The current state of the cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster.State getState() { + com.google.cloud.managedkafka.v1.ConnectCluster.State result = + com.google.cloud.managedkafka.v1.ConnectCluster.State.forNumber(state_); + return result == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.State.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Output only. The current state of the cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.cloud.managedkafka.v1.ConnectCluster.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The current state of the cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000080); + state_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.MapField config_; + + private com.google.protobuf.MapField internalGetConfig() { + if (config_ == null) { + return com.google.protobuf.MapField.emptyMapField(ConfigDefaultEntryHolder.defaultEntry); + } + return config_; + } + + private com.google.protobuf.MapField + internalGetMutableConfig() { + if (config_ == null) { + config_ = com.google.protobuf.MapField.newMapField(ConfigDefaultEntryHolder.defaultEntry); + } + if (!config_.isMutable()) { + config_ = config_.copy(); + } + bitField0_ |= 0x00000100; + onChanged(); + return config_; + } + + public int getConfigCount() { + return internalGetConfig().getMap().size(); + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsConfig(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetConfig().getMap().containsKey(key); + } + /** Use {@link #getConfigMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getConfig() { + return getConfigMap(); + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getConfigMap() { + return internalGetConfig().getMap(); + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getConfigOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfig().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getConfigOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfig().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearConfig() { + bitField0_ = (bitField0_ & ~0x00000100); + internalGetMutableConfig().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder removeConfig(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableConfig().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableConfig() { + bitField0_ |= 0x00000100; + return internalGetMutableConfig().getMutableMap(); + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putConfig(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableConfig().getMutableMap().put(key, value); + bitField0_ |= 0x00000100; + return this; + } + /** + * + * + *
+     * Optional. Configurations for the worker that are overridden from the
+     * defaults. The key of the map is a Kafka Connect worker property name, for
+     * example: `exactly.once.source.support`.
+     * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putAllConfig(java.util.Map values) { + internalGetMutableConfig().getMutableMap().putAll(values); + bitField0_ |= 0x00000100; + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ConnectCluster) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ConnectCluster) + private static final com.google.cloud.managedkafka.v1.ConnectCluster DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ConnectCluster(); + } + + public static com.google.cloud.managedkafka.v1.ConnectCluster getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ConnectCluster parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterName.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterName.java new file mode 100644 index 000000000000..5069ea45fddc --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterName.java @@ -0,0 +1,227 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ConnectClusterName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_CONNECT_CLUSTER = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/connectClusters/{connect_cluster}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String connectCluster; + + @Deprecated + protected ConnectClusterName() { + project = null; + location = null; + connectCluster = null; + } + + private ConnectClusterName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + connectCluster = Preconditions.checkNotNull(builder.getConnectCluster()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getConnectCluster() { + return connectCluster; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ConnectClusterName of(String project, String location, String connectCluster) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setConnectCluster(connectCluster) + .build(); + } + + public static String format(String project, String location, String connectCluster) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setConnectCluster(connectCluster) + .build() + .toString(); + } + + public static ConnectClusterName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_CONNECT_CLUSTER.validatedMatch( + formattedString, "ConnectClusterName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("location"), matchMap.get("connect_cluster")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ConnectClusterName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_CONNECT_CLUSTER.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (connectCluster != null) { + fieldMapBuilder.put("connect_cluster", connectCluster); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_CONNECT_CLUSTER.instantiate( + "project", project, "location", location, "connect_cluster", connectCluster); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ConnectClusterName that = ((ConnectClusterName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.connectCluster, that.connectCluster); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(connectCluster); + return h; + } + + /** Builder for projects/{project}/locations/{location}/connectClusters/{connect_cluster}. */ + public static class Builder { + private String project; + private String location; + private String connectCluster; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getConnectCluster() { + return connectCluster; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setConnectCluster(String connectCluster) { + this.connectCluster = connectCluster; + return this; + } + + private Builder(ConnectClusterName connectClusterName) { + this.project = connectClusterName.project; + this.location = connectClusterName.location; + this.connectCluster = connectClusterName.connectCluster; + } + + public ConnectClusterName build() { + return new ConnectClusterName(this); + } + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterOrBuilder.java new file mode 100644 index 000000000000..873bad02ee39 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectClusterOrBuilder.java @@ -0,0 +1,404 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ConnectClusterOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ConnectCluster) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the gcpConfig field is set. + */ + boolean hasGcpConfig(); + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The gcpConfig. + */ + com.google.cloud.managedkafka.v1.ConnectGcpConfig getGcpConfig(); + /** + * + * + *
+   * Required. Configuration properties for a Kafka Connect cluster deployed
+   * to Google Cloud Platform.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectGcpConfig gcp_config = 7 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder getGcpConfigOrBuilder(); + + /** + * + * + *
+   * Identifier. The name of the Kafka Connect cluster. Structured like:
+   * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Identifier. The name of the Kafka Connect cluster. Structured like:
+   * projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+   * cluster is attached to. Structured like:
+   * projects/{project}/locations/{location}/clusters/{cluster}
+   * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The kafkaCluster. + */ + java.lang.String getKafkaCluster(); + /** + * + * + *
+   * Required. Immutable. The name of the Kafka cluster this Kafka Connect
+   * cluster is attached to. Structured like:
+   * projects/{project}/locations/{location}/clusters/{cluster}
+   * 
+ * + * + * string kafka_cluster = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The bytes for kafkaCluster. + */ + com.google.protobuf.ByteString getKafkaClusterBytes(); + + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + /** + * + * + *
+   * Output only. The time when the cluster was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + /** + * + * + *
+   * Output only. The time when the cluster was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getLabelsCount(); + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsLabels(java.lang.String key); + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getLabelsMap(); + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + /** + * + * + *
+   * Optional. Labels as key value pairs.
+   * 
+ * + * map<string, string> labels = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the capacityConfig field is set. + */ + boolean hasCapacityConfig(); + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The capacityConfig. + */ + com.google.cloud.managedkafka.v1.CapacityConfig getCapacityConfig(); + /** + * + * + *
+   * Required. Capacity configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 6 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.CapacityConfigOrBuilder getCapacityConfigOrBuilder(); + + /** + * + * + *
+   * Output only. The current state of the cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + /** + * + * + *
+   * Output only. The current state of the cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster.State state = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.cloud.managedkafka.v1.ConnectCluster.State getState(); + + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getConfigCount(); + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsConfig(java.lang.String key); + /** Use {@link #getConfigMap()} instead. */ + @java.lang.Deprecated + java.util.Map getConfig(); + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getConfigMap(); + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + /* nullable */ + java.lang.String getConfigOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + /** + * + * + *
+   * Optional. Configurations for the worker that are overridden from the
+   * defaults. The key of the map is a Kafka Connect worker property name, for
+   * example: `exactly.once.source.support`.
+   * 
+ * + * map<string, string> config = 9 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getConfigOrThrow(java.lang.String key); + + com.google.cloud.managedkafka.v1.ConnectCluster.PlatformConfigCase getPlatformConfigCase(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfig.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfig.java new file mode 100644 index 000000000000..b4f2b5acc322 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfig.java @@ -0,0 +1,1094 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Configuration properties for a Kafka Connect cluster deployed to Google Cloud
+ * Platform.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectGcpConfig} + */ +public final class ConnectGcpConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ConnectGcpConfig) + ConnectGcpConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use ConnectGcpConfig.newBuilder() to construct. + private ConnectGcpConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ConnectGcpConfig() { + secretPaths_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ConnectGcpConfig(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectGcpConfig.class, + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder.class); + } + + private int bitField0_; + public static final int ACCESS_CONFIG_FIELD_NUMBER = 1; + private com.google.cloud.managedkafka.v1.ConnectAccessConfig accessConfig_; + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the accessConfig field is set. + */ + @java.lang.Override + public boolean hasAccessConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The accessConfig. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfig getAccessConfig() { + return accessConfig_ == null + ? com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance() + : accessConfig_; + } + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder getAccessConfigOrBuilder() { + return accessConfig_ == null + ? com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance() + : accessConfig_; + } + + public static final int SECRET_PATHS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList secretPaths_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the secretPaths. + */ + public com.google.protobuf.ProtocolStringList getSecretPathsList() { + return secretPaths_; + } + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of secretPaths. + */ + public int getSecretPathsCount() { + return secretPaths_.size(); + } + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The secretPaths at the given index. + */ + public java.lang.String getSecretPaths(int index) { + return secretPaths_.get(index); + } + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the secretPaths at the given index. + */ + public com.google.protobuf.ByteString getSecretPathsBytes(int index) { + return secretPaths_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAccessConfig()); + } + for (int i = 0; i < secretPaths_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, secretPaths_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAccessConfig()); + } + { + int dataSize = 0; + for (int i = 0; i < secretPaths_.size(); i++) { + dataSize += computeStringSizeNoTag(secretPaths_.getRaw(i)); + } + size += dataSize; + size += 1 * getSecretPathsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ConnectGcpConfig)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ConnectGcpConfig other = + (com.google.cloud.managedkafka.v1.ConnectGcpConfig) obj; + + if (hasAccessConfig() != other.hasAccessConfig()) return false; + if (hasAccessConfig()) { + if (!getAccessConfig().equals(other.getAccessConfig())) return false; + } + if (!getSecretPathsList().equals(other.getSecretPathsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAccessConfig()) { + hash = (37 * hash) + ACCESS_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAccessConfig().hashCode(); + } + if (getSecretPathsCount() > 0) { + hash = (37 * hash) + SECRET_PATHS_FIELD_NUMBER; + hash = (53 * hash) + getSecretPathsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.ConnectGcpConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Configuration properties for a Kafka Connect cluster deployed to Google Cloud
+   * Platform.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectGcpConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ConnectGcpConfig) + com.google.cloud.managedkafka.v1.ConnectGcpConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectGcpConfig.class, + com.google.cloud.managedkafka.v1.ConnectGcpConfig.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ConnectGcpConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getAccessConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + accessConfig_ = null; + if (accessConfigBuilder_ != null) { + accessConfigBuilder_.dispose(); + accessConfigBuilder_ = null; + } + secretPaths_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig build() { + com.google.cloud.managedkafka.v1.ConnectGcpConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig buildPartial() { + com.google.cloud.managedkafka.v1.ConnectGcpConfig result = + new com.google.cloud.managedkafka.v1.ConnectGcpConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ConnectGcpConfig result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.accessConfig_ = + accessConfigBuilder_ == null ? accessConfig_ : accessConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + secretPaths_.makeImmutable(); + result.secretPaths_ = secretPaths_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ConnectGcpConfig) { + return mergeFrom((com.google.cloud.managedkafka.v1.ConnectGcpConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ConnectGcpConfig other) { + if (other == com.google.cloud.managedkafka.v1.ConnectGcpConfig.getDefaultInstance()) + return this; + if (other.hasAccessConfig()) { + mergeAccessConfig(other.getAccessConfig()); + } + if (!other.secretPaths_.isEmpty()) { + if (secretPaths_.isEmpty()) { + secretPaths_ = other.secretPaths_; + bitField0_ |= 0x00000002; + } else { + ensureSecretPathsIsMutable(); + secretPaths_.addAll(other.secretPaths_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getAccessConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSecretPathsIsMutable(); + secretPaths_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.cloud.managedkafka.v1.ConnectAccessConfig accessConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectAccessConfig, + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder> + accessConfigBuilder_; + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the accessConfig field is set. + */ + public boolean hasAccessConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The accessConfig. + */ + public com.google.cloud.managedkafka.v1.ConnectAccessConfig getAccessConfig() { + if (accessConfigBuilder_ == null) { + return accessConfig_ == null + ? com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance() + : accessConfig_; + } else { + return accessConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAccessConfig(com.google.cloud.managedkafka.v1.ConnectAccessConfig value) { + if (accessConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + accessConfig_ = value; + } else { + accessConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAccessConfig( + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder builderForValue) { + if (accessConfigBuilder_ == null) { + accessConfig_ = builderForValue.build(); + } else { + accessConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeAccessConfig(com.google.cloud.managedkafka.v1.ConnectAccessConfig value) { + if (accessConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && accessConfig_ != null + && accessConfig_ + != com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance()) { + getAccessConfigBuilder().mergeFrom(value); + } else { + accessConfig_ = value; + } + } else { + accessConfigBuilder_.mergeFrom(value); + } + if (accessConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearAccessConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + accessConfig_ = null; + if (accessConfigBuilder_ != null) { + accessConfigBuilder_.dispose(); + accessConfigBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder getAccessConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getAccessConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder + getAccessConfigOrBuilder() { + if (accessConfigBuilder_ != null) { + return accessConfigBuilder_.getMessageOrBuilder(); + } else { + return accessConfig_ == null + ? com.google.cloud.managedkafka.v1.ConnectAccessConfig.getDefaultInstance() + : accessConfig_; + } + } + /** + * + * + *
+     * Required. Access configuration for the Kafka Connect cluster.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectAccessConfig, + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder> + getAccessConfigFieldBuilder() { + if (accessConfigBuilder_ == null) { + accessConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectAccessConfig, + com.google.cloud.managedkafka.v1.ConnectAccessConfig.Builder, + com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder>( + getAccessConfig(), getParentForChildren(), isClean()); + accessConfig_ = null; + } + return accessConfigBuilder_; + } + + private com.google.protobuf.LazyStringArrayList secretPaths_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSecretPathsIsMutable() { + if (!secretPaths_.isModifiable()) { + secretPaths_ = new com.google.protobuf.LazyStringArrayList(secretPaths_); + } + bitField0_ |= 0x00000002; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the secretPaths. + */ + public com.google.protobuf.ProtocolStringList getSecretPathsList() { + secretPaths_.makeImmutable(); + return secretPaths_; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of secretPaths. + */ + public int getSecretPathsCount() { + return secretPaths_.size(); + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The secretPaths at the given index. + */ + public java.lang.String getSecretPaths(int index) { + return secretPaths_.get(index); + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the secretPaths at the given index. + */ + public com.google.protobuf.ByteString getSecretPathsBytes(int index) { + return secretPaths_.getByteString(index); + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The secretPaths to set. + * @return This builder for chaining. + */ + public Builder setSecretPaths(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSecretPathsIsMutable(); + secretPaths_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The secretPaths to add. + * @return This builder for chaining. + */ + public Builder addSecretPaths(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSecretPathsIsMutable(); + secretPaths_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The secretPaths to add. + * @return This builder for chaining. + */ + public Builder addAllSecretPaths(java.lang.Iterable values) { + ensureSecretPathsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, secretPaths_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSecretPaths() { + secretPaths_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+     * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+     * be loaded into one cluster. Format:
+     * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+     * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the secretPaths to add. + * @return This builder for chaining. + */ + public Builder addSecretPathsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSecretPathsIsMutable(); + secretPaths_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ConnectGcpConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ConnectGcpConfig) + private static final com.google.cloud.managedkafka.v1.ConnectGcpConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ConnectGcpConfig(); + } + + public static com.google.cloud.managedkafka.v1.ConnectGcpConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ConnectGcpConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectGcpConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfigOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfigOrBuilder.java new file mode 100644 index 000000000000..aedfc1eaeef1 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectGcpConfigOrBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ConnectGcpConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ConnectGcpConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the accessConfig field is set. + */ + boolean hasAccessConfig(); + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The accessConfig. + */ + com.google.cloud.managedkafka.v1.ConnectAccessConfig getAccessConfig(); + /** + * + * + *
+   * Required. Access configuration for the Kafka Connect cluster.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectAccessConfig access_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectAccessConfigOrBuilder getAccessConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the secretPaths. + */ + java.util.List getSecretPathsList(); + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of secretPaths. + */ + int getSecretPathsCount(); + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The secretPaths at the given index. + */ + java.lang.String getSecretPaths(int index); + /** + * + * + *
+   * Optional. Secrets to load into workers. Exact SecretVersions from Secret
+   * Manager must be provided -- aliases are not supported. Up to 32 secrets may
+   * be loaded into one cluster. Format:
+   * projects/<project-id>/secrets/<secret-name>/versions/<version-id>
+   * 
+ * + * + * repeated string secret_paths = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the secretPaths at the given index. + */ + com.google.protobuf.ByteString getSecretPathsBytes(int index); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfig.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfig.java new file mode 100644 index 000000000000..5c932cf9b048 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfig.java @@ -0,0 +1,1352 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * The configuration of a Virtual Private Cloud (VPC) network that can access
+ * the Kafka Connect cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectNetworkConfig} + */ +public final class ConnectNetworkConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ConnectNetworkConfig) + ConnectNetworkConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use ConnectNetworkConfig.newBuilder() to construct. + private ConnectNetworkConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ConnectNetworkConfig() { + primarySubnet_ = ""; + additionalSubnets_ = com.google.protobuf.LazyStringArrayList.emptyList(); + dnsDomainNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ConnectNetworkConfig(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.class, + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder.class); + } + + public static final int PRIMARY_SUBNET_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object primarySubnet_ = ""; + /** + * + * + *
+   * Required. VPC subnet to make available to the Kafka Connect cluster.
+   * Structured like:
+   * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+   *
+   * It is used to create a Private Service Connect (PSC) interface for the
+   * Kafka Connect workers. It must be located in the same region as the
+   * Kafka Connect cluster.
+   *
+   * The CIDR range of the subnet must be within the IPv4 address ranges for
+   * private networks, as specified in RFC 1918. The primary subnet CIDR range
+   * must have a minimum size of /22 (1024 addresses).
+   * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The primarySubnet. + */ + @java.lang.Override + public java.lang.String getPrimarySubnet() { + java.lang.Object ref = primarySubnet_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + primarySubnet_ = s; + return s; + } + } + /** + * + * + *
+   * Required. VPC subnet to make available to the Kafka Connect cluster.
+   * Structured like:
+   * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+   *
+   * It is used to create a Private Service Connect (PSC) interface for the
+   * Kafka Connect workers. It must be located in the same region as the
+   * Kafka Connect cluster.
+   *
+   * The CIDR range of the subnet must be within the IPv4 address ranges for
+   * private networks, as specified in RFC 1918. The primary subnet CIDR range
+   * must have a minimum size of /22 (1024 addresses).
+   * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for primarySubnet. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrimarySubnetBytes() { + java.lang.Object ref = primarySubnet_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + primarySubnet_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ADDITIONAL_SUBNETS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList additionalSubnets_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the additionalSubnets. + */ + public com.google.protobuf.ProtocolStringList getAdditionalSubnetsList() { + return additionalSubnets_; + } + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of additionalSubnets. + */ + public int getAdditionalSubnetsCount() { + return additionalSubnets_.size(); + } + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The additionalSubnets at the given index. + */ + public java.lang.String getAdditionalSubnets(int index) { + return additionalSubnets_.get(index); + } + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the additionalSubnets at the given index. + */ + public com.google.protobuf.ByteString getAdditionalSubnetsBytes(int index) { + return additionalSubnets_.getByteString(index); + } + + public static final int DNS_DOMAIN_NAMES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList dnsDomainNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dnsDomainNames. + */ + public com.google.protobuf.ProtocolStringList getDnsDomainNamesList() { + return dnsDomainNames_; + } + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dnsDomainNames. + */ + public int getDnsDomainNamesCount() { + return dnsDomainNames_.size(); + } + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dnsDomainNames at the given index. + */ + public java.lang.String getDnsDomainNames(int index) { + return dnsDomainNames_.get(index); + } + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dnsDomainNames at the given index. + */ + public com.google.protobuf.ByteString getDnsDomainNamesBytes(int index) { + return dnsDomainNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < dnsDomainNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, dnsDomainNames_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(primarySubnet_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, primarySubnet_); + } + for (int i = 0; i < additionalSubnets_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, additionalSubnets_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < dnsDomainNames_.size(); i++) { + dataSize += computeStringSizeNoTag(dnsDomainNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getDnsDomainNamesList().size(); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(primarySubnet_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, primarySubnet_); + } + { + int dataSize = 0; + for (int i = 0; i < additionalSubnets_.size(); i++) { + dataSize += computeStringSizeNoTag(additionalSubnets_.getRaw(i)); + } + size += dataSize; + size += 1 * getAdditionalSubnetsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ConnectNetworkConfig)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ConnectNetworkConfig other = + (com.google.cloud.managedkafka.v1.ConnectNetworkConfig) obj; + + if (!getPrimarySubnet().equals(other.getPrimarySubnet())) return false; + if (!getAdditionalSubnetsList().equals(other.getAdditionalSubnetsList())) return false; + if (!getDnsDomainNamesList().equals(other.getDnsDomainNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PRIMARY_SUBNET_FIELD_NUMBER; + hash = (53 * hash) + getPrimarySubnet().hashCode(); + if (getAdditionalSubnetsCount() > 0) { + hash = (37 * hash) + ADDITIONAL_SUBNETS_FIELD_NUMBER; + hash = (53 * hash) + getAdditionalSubnetsList().hashCode(); + } + if (getDnsDomainNamesCount() > 0) { + hash = (37 * hash) + DNS_DOMAIN_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getDnsDomainNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ConnectNetworkConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * The configuration of a Virtual Private Cloud (VPC) network that can access
+   * the Kafka Connect cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ConnectNetworkConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ConnectNetworkConfig) + com.google.cloud.managedkafka.v1.ConnectNetworkConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.class, + com.google.cloud.managedkafka.v1.ConnectNetworkConfig.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ConnectNetworkConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + primarySubnet_ = ""; + additionalSubnets_ = com.google.protobuf.LazyStringArrayList.emptyList(); + dnsDomainNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ConnectNetworkConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig build() { + com.google.cloud.managedkafka.v1.ConnectNetworkConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig buildPartial() { + com.google.cloud.managedkafka.v1.ConnectNetworkConfig result = + new com.google.cloud.managedkafka.v1.ConnectNetworkConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ConnectNetworkConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.primarySubnet_ = primarySubnet_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + additionalSubnets_.makeImmutable(); + result.additionalSubnets_ = additionalSubnets_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + dnsDomainNames_.makeImmutable(); + result.dnsDomainNames_ = dnsDomainNames_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ConnectNetworkConfig) { + return mergeFrom((com.google.cloud.managedkafka.v1.ConnectNetworkConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ConnectNetworkConfig other) { + if (other == com.google.cloud.managedkafka.v1.ConnectNetworkConfig.getDefaultInstance()) + return this; + if (!other.getPrimarySubnet().isEmpty()) { + primarySubnet_ = other.primarySubnet_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.additionalSubnets_.isEmpty()) { + if (additionalSubnets_.isEmpty()) { + additionalSubnets_ = other.additionalSubnets_; + bitField0_ |= 0x00000002; + } else { + ensureAdditionalSubnetsIsMutable(); + additionalSubnets_.addAll(other.additionalSubnets_); + } + onChanged(); + } + if (!other.dnsDomainNames_.isEmpty()) { + if (dnsDomainNames_.isEmpty()) { + dnsDomainNames_ = other.dnsDomainNames_; + bitField0_ |= 0x00000004; + } else { + ensureDnsDomainNamesIsMutable(); + dnsDomainNames_.addAll(other.dnsDomainNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureDnsDomainNamesIsMutable(); + dnsDomainNames_.add(s); + break; + } // case 18 + case 26: + { + primarySubnet_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 26 + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureAdditionalSubnetsIsMutable(); + additionalSubnets_.add(s); + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object primarySubnet_ = ""; + /** + * + * + *
+     * Required. VPC subnet to make available to the Kafka Connect cluster.
+     * Structured like:
+     * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+     *
+     * It is used to create a Private Service Connect (PSC) interface for the
+     * Kafka Connect workers. It must be located in the same region as the
+     * Kafka Connect cluster.
+     *
+     * The CIDR range of the subnet must be within the IPv4 address ranges for
+     * private networks, as specified in RFC 1918. The primary subnet CIDR range
+     * must have a minimum size of /22 (1024 addresses).
+     * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The primarySubnet. + */ + public java.lang.String getPrimarySubnet() { + java.lang.Object ref = primarySubnet_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + primarySubnet_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. VPC subnet to make available to the Kafka Connect cluster.
+     * Structured like:
+     * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+     *
+     * It is used to create a Private Service Connect (PSC) interface for the
+     * Kafka Connect workers. It must be located in the same region as the
+     * Kafka Connect cluster.
+     *
+     * The CIDR range of the subnet must be within the IPv4 address ranges for
+     * private networks, as specified in RFC 1918. The primary subnet CIDR range
+     * must have a minimum size of /22 (1024 addresses).
+     * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for primarySubnet. + */ + public com.google.protobuf.ByteString getPrimarySubnetBytes() { + java.lang.Object ref = primarySubnet_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + primarySubnet_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. VPC subnet to make available to the Kafka Connect cluster.
+     * Structured like:
+     * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+     *
+     * It is used to create a Private Service Connect (PSC) interface for the
+     * Kafka Connect workers. It must be located in the same region as the
+     * Kafka Connect cluster.
+     *
+     * The CIDR range of the subnet must be within the IPv4 address ranges for
+     * private networks, as specified in RFC 1918. The primary subnet CIDR range
+     * must have a minimum size of /22 (1024 addresses).
+     * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The primarySubnet to set. + * @return This builder for chaining. + */ + public Builder setPrimarySubnet(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + primarySubnet_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. VPC subnet to make available to the Kafka Connect cluster.
+     * Structured like:
+     * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+     *
+     * It is used to create a Private Service Connect (PSC) interface for the
+     * Kafka Connect workers. It must be located in the same region as the
+     * Kafka Connect cluster.
+     *
+     * The CIDR range of the subnet must be within the IPv4 address ranges for
+     * private networks, as specified in RFC 1918. The primary subnet CIDR range
+     * must have a minimum size of /22 (1024 addresses).
+     * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearPrimarySubnet() { + primarySubnet_ = getDefaultInstance().getPrimarySubnet(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. VPC subnet to make available to the Kafka Connect cluster.
+     * Structured like:
+     * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+     *
+     * It is used to create a Private Service Connect (PSC) interface for the
+     * Kafka Connect workers. It must be located in the same region as the
+     * Kafka Connect cluster.
+     *
+     * The CIDR range of the subnet must be within the IPv4 address ranges for
+     * private networks, as specified in RFC 1918. The primary subnet CIDR range
+     * must have a minimum size of /22 (1024 addresses).
+     * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for primarySubnet to set. + * @return This builder for chaining. + */ + public Builder setPrimarySubnetBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + primarySubnet_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList additionalSubnets_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureAdditionalSubnetsIsMutable() { + if (!additionalSubnets_.isModifiable()) { + additionalSubnets_ = new com.google.protobuf.LazyStringArrayList(additionalSubnets_); + } + bitField0_ |= 0x00000002; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return A list containing the additionalSubnets. + */ + public com.google.protobuf.ProtocolStringList getAdditionalSubnetsList() { + additionalSubnets_.makeImmutable(); + return additionalSubnets_; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The count of additionalSubnets. + */ + public int getAdditionalSubnetsCount() { + return additionalSubnets_.size(); + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the element to return. + * @return The additionalSubnets at the given index. + */ + public java.lang.String getAdditionalSubnets(int index) { + return additionalSubnets_.get(index); + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index of the value to return. + * @return The bytes of the additionalSubnets at the given index. + */ + public com.google.protobuf.ByteString getAdditionalSubnetsBytes(int index) { + return additionalSubnets_.getByteString(index); + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param index The index to set the value at. + * @param value The additionalSubnets to set. + * @return This builder for chaining. + */ + public Builder setAdditionalSubnets(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAdditionalSubnetsIsMutable(); + additionalSubnets_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The additionalSubnets to add. + * @return This builder for chaining. + */ + public Builder addAdditionalSubnets(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAdditionalSubnetsIsMutable(); + additionalSubnets_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param values The additionalSubnets to add. + * @return This builder for chaining. + */ + public Builder addAllAdditionalSubnets(java.lang.Iterable values) { + ensureAdditionalSubnetsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, additionalSubnets_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAdditionalSubnets() { + additionalSubnets_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional subnets may be specified. They may be in another
+     * region, but must be in the same VPC network. The Connect workers can
+     * communicate with network endpoints in either the primary or additional
+     * subnets.
+     * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The bytes of the additionalSubnets to add. + * @return This builder for chaining. + */ + public Builder addAdditionalSubnetsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureAdditionalSubnetsIsMutable(); + additionalSubnets_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList dnsDomainNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureDnsDomainNamesIsMutable() { + if (!dnsDomainNames_.isModifiable()) { + dnsDomainNames_ = new com.google.protobuf.LazyStringArrayList(dnsDomainNames_); + } + bitField0_ |= 0x00000004; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dnsDomainNames. + */ + public com.google.protobuf.ProtocolStringList getDnsDomainNamesList() { + dnsDomainNames_.makeImmutable(); + return dnsDomainNames_; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dnsDomainNames. + */ + public int getDnsDomainNamesCount() { + return dnsDomainNames_.size(); + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dnsDomainNames at the given index. + */ + public java.lang.String getDnsDomainNames(int index) { + return dnsDomainNames_.get(index); + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dnsDomainNames at the given index. + */ + public com.google.protobuf.ByteString getDnsDomainNamesBytes(int index) { + return dnsDomainNames_.getByteString(index); + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The dnsDomainNames to set. + * @return This builder for chaining. + */ + public Builder setDnsDomainNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDnsDomainNamesIsMutable(); + dnsDomainNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The dnsDomainNames to add. + * @return This builder for chaining. + */ + public Builder addDnsDomainNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDnsDomainNamesIsMutable(); + dnsDomainNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The dnsDomainNames to add. + * @return This builder for chaining. + */ + public Builder addAllDnsDomainNames(java.lang.Iterable values) { + ensureDnsDomainNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, dnsDomainNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDnsDomainNames() { + dnsDomainNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Additional DNS domain names from the subnet's network to be made
+     * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+     * add the bootstrap address's dns domain name of the target cluster to make
+     * it visible to the connector. For example:
+     * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+     * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the dnsDomainNames to add. + * @return This builder for chaining. + */ + public Builder addDnsDomainNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureDnsDomainNamesIsMutable(); + dnsDomainNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ConnectNetworkConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ConnectNetworkConfig) + private static final com.google.cloud.managedkafka.v1.ConnectNetworkConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ConnectNetworkConfig(); + } + + public static com.google.cloud.managedkafka.v1.ConnectNetworkConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ConnectNetworkConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectNetworkConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfigOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfigOrBuilder.java new file mode 100644 index 000000000000..7bfb2feb5270 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectNetworkConfigOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ConnectNetworkConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ConnectNetworkConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. VPC subnet to make available to the Kafka Connect cluster.
+   * Structured like:
+   * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+   *
+   * It is used to create a Private Service Connect (PSC) interface for the
+   * Kafka Connect workers. It must be located in the same region as the
+   * Kafka Connect cluster.
+   *
+   * The CIDR range of the subnet must be within the IPv4 address ranges for
+   * private networks, as specified in RFC 1918. The primary subnet CIDR range
+   * must have a minimum size of /22 (1024 addresses).
+   * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The primarySubnet. + */ + java.lang.String getPrimarySubnet(); + /** + * + * + *
+   * Required. VPC subnet to make available to the Kafka Connect cluster.
+   * Structured like:
+   * projects/{project}/regions/{region}/subnetworks/{subnet_id}
+   *
+   * It is used to create a Private Service Connect (PSC) interface for the
+   * Kafka Connect workers. It must be located in the same region as the
+   * Kafka Connect cluster.
+   *
+   * The CIDR range of the subnet must be within the IPv4 address ranges for
+   * private networks, as specified in RFC 1918. The primary subnet CIDR range
+   * must have a minimum size of /22 (1024 addresses).
+   * 
+ * + * string primary_subnet = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for primarySubnet. + */ + com.google.protobuf.ByteString getPrimarySubnetBytes(); + + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the additionalSubnets. + */ + java.util.List getAdditionalSubnetsList(); + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of additionalSubnets. + */ + int getAdditionalSubnetsCount(); + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The additionalSubnets at the given index. + */ + java.lang.String getAdditionalSubnets(int index); + /** + * + * + *
+   * Optional. Additional subnets may be specified. They may be in another
+   * region, but must be in the same VPC network. The Connect workers can
+   * communicate with network endpoints in either the primary or additional
+   * subnets.
+   * 
+ * + * repeated string additional_subnets = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the additionalSubnets at the given index. + */ + com.google.protobuf.ByteString getAdditionalSubnetsBytes(int index); + + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the dnsDomainNames. + */ + java.util.List getDnsDomainNamesList(); + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of dnsDomainNames. + */ + int getDnsDomainNamesCount(); + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The dnsDomainNames at the given index. + */ + java.lang.String getDnsDomainNames(int index); + /** + * + * + *
+   * Optional. Additional DNS domain names from the subnet's network to be made
+   * visible to the Connect Cluster. When using MirrorMaker2, it's necessary to
+   * add the bootstrap address's dns domain name of the target cluster to make
+   * it visible to the connector. For example:
+   * my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog
+   * 
+ * + * repeated string dns_domain_names = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the dnsDomainNames at the given index. + */ + com.google.protobuf.ByteString getDnsDomainNamesBytes(int index); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/Connector.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/Connector.java new file mode 100644 index 000000000000..fb9490490f08 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/Connector.java @@ -0,0 +1,1806 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * A Kafka Connect connector in a given ConnectCluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.Connector} + */ +public final class Connector extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.Connector) + ConnectorOrBuilder { + private static final long serialVersionUID = 0L; + // Use Connector.newBuilder() to construct. + private Connector(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Connector() { + name_ = ""; + state_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Connector(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetConfigs(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.Connector.class, + com.google.cloud.managedkafka.v1.Connector.Builder.class); + } + + /** + * + * + *
+   * The state of the connector.
+   * 
+ * + * Protobuf enum {@code google.cloud.managedkafka.v1.Connector.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * A state was not specified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
+     * The connector is not assigned to any tasks, usually transient.
+     * 
+ * + * UNASSIGNED = 1; + */ + UNASSIGNED(1), + /** + * + * + *
+     * The connector is running.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + * + * + *
+     * The connector has been paused.
+     * 
+ * + * PAUSED = 3; + */ + PAUSED(3), + /** + * + * + *
+     * The connector has failed. See logs for why.
+     * 
+ * + * FAILED = 4; + */ + FAILED(4), + /** + * + * + *
+     * The connector is restarting.
+     * 
+ * + * RESTARTING = 5; + */ + RESTARTING(5), + /** + * + * + *
+     * The connector has been stopped.
+     * 
+ * + * STOPPED = 6; + */ + STOPPED(6), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * A state was not specified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * The connector is not assigned to any tasks, usually transient.
+     * 
+ * + * UNASSIGNED = 1; + */ + public static final int UNASSIGNED_VALUE = 1; + /** + * + * + *
+     * The connector is running.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + * + * + *
+     * The connector has been paused.
+     * 
+ * + * PAUSED = 3; + */ + public static final int PAUSED_VALUE = 3; + /** + * + * + *
+     * The connector has failed. See logs for why.
+     * 
+ * + * FAILED = 4; + */ + public static final int FAILED_VALUE = 4; + /** + * + * + *
+     * The connector is restarting.
+     * 
+ * + * RESTARTING = 5; + */ + public static final int RESTARTING_VALUE = 5; + /** + * + * + *
+     * The connector has been stopped.
+     * 
+ * + * STOPPED = 6; + */ + public static final int STOPPED_VALUE = 6; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return UNASSIGNED; + case 2: + return RUNNING; + case 3: + return PAUSED; + case 4: + return FAILED; + case 5: + return RESTARTING; + case 6: + return STOPPED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.Connector.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.managedkafka.v1.Connector.State) + } + + private int restartPolicyCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object restartPolicy_; + + public enum RestartPolicyCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + TASK_RESTART_POLICY(4), + RESTARTPOLICY_NOT_SET(0); + private final int value; + + private RestartPolicyCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RestartPolicyCase valueOf(int value) { + return forNumber(value); + } + + public static RestartPolicyCase forNumber(int value) { + switch (value) { + case 4: + return TASK_RESTART_POLICY; + case 0: + return RESTARTPOLICY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RestartPolicyCase getRestartPolicyCase() { + return RestartPolicyCase.forNumber(restartPolicyCase_); + } + + public static final int TASK_RESTART_POLICY_FIELD_NUMBER = 4; + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the taskRestartPolicy field is set. + */ + @java.lang.Override + public boolean hasTaskRestartPolicy() { + return restartPolicyCase_ == 4; + } + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The taskRestartPolicy. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy getTaskRestartPolicy() { + if (restartPolicyCase_ == 4) { + return (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_; + } + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder getTaskRestartPolicyOrBuilder() { + if (restartPolicyCase_ == 4) { + return (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_; + } + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Identifier. The name of the connector.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Identifier. The name of the connector.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIGS_FIELD_NUMBER = 2; + + private static final class ConfigsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField configs_; + + private com.google.protobuf.MapField internalGetConfigs() { + if (configs_ == null) { + return com.google.protobuf.MapField.emptyMapField(ConfigsDefaultEntryHolder.defaultEntry); + } + return configs_; + } + + public int getConfigsCount() { + return internalGetConfigs().getMap().size(); + } + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsConfigs(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetConfigs().getMap().containsKey(key); + } + /** Use {@link #getConfigsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getConfigs() { + return getConfigsMap(); + } + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getConfigsMap() { + return internalGetConfigs().getMap(); + } + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getConfigsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfigs().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getConfigsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfigs().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int STATE_FIELD_NUMBER = 3; + private int state_ = 0; + /** + * + * + *
+   * Output only. The current state of the connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+   * Output only. The current state of the connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector.State getState() { + com.google.cloud.managedkafka.v1.Connector.State result = + com.google.cloud.managedkafka.v1.Connector.State.forNumber(state_); + return result == null ? com.google.cloud.managedkafka.v1.Connector.State.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetConfigs(), ConfigsDefaultEntryHolder.defaultEntry, 2); + if (state_ != com.google.cloud.managedkafka.v1.Connector.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(3, state_); + } + if (restartPolicyCase_ == 4) { + output.writeMessage(4, (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + for (java.util.Map.Entry entry : + internalGetConfigs().getMap().entrySet()) { + com.google.protobuf.MapEntry configs__ = + ConfigsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, configs__); + } + if (state_ != com.google.cloud.managedkafka.v1.Connector.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, state_); + } + if (restartPolicyCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.Connector)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.Connector other = + (com.google.cloud.managedkafka.v1.Connector) obj; + + if (!getName().equals(other.getName())) return false; + if (!internalGetConfigs().equals(other.internalGetConfigs())) return false; + if (state_ != other.state_) return false; + if (!getRestartPolicyCase().equals(other.getRestartPolicyCase())) return false; + switch (restartPolicyCase_) { + case 4: + if (!getTaskRestartPolicy().equals(other.getTaskRestartPolicy())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (!internalGetConfigs().getMap().isEmpty()) { + hash = (37 * hash) + CONFIGS_FIELD_NUMBER; + hash = (53 * hash) + internalGetConfigs().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + switch (restartPolicyCase_) { + case 4: + hash = (37 * hash) + TASK_RESTART_POLICY_FIELD_NUMBER; + hash = (53 * hash) + getTaskRestartPolicy().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.Connector parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.Connector parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.Connector parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.Connector prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A Kafka Connect connector in a given ConnectCluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.Connector} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.Connector) + com.google.cloud.managedkafka.v1.ConnectorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetConfigs(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetMutableConfigs(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.Connector.class, + com.google.cloud.managedkafka.v1.Connector.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.Connector.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (taskRestartPolicyBuilder_ != null) { + taskRestartPolicyBuilder_.clear(); + } + name_ = ""; + internalGetMutableConfigs().clear(); + state_ = 0; + restartPolicyCase_ = 0; + restartPolicy_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_Connector_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.Connector.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector build() { + com.google.cloud.managedkafka.v1.Connector result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector buildPartial() { + com.google.cloud.managedkafka.v1.Connector result = + new com.google.cloud.managedkafka.v1.Connector(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.Connector result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.configs_ = internalGetConfigs(); + result.configs_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.state_ = state_; + } + } + + private void buildPartialOneofs(com.google.cloud.managedkafka.v1.Connector result) { + result.restartPolicyCase_ = restartPolicyCase_; + result.restartPolicy_ = this.restartPolicy_; + if (restartPolicyCase_ == 4 && taskRestartPolicyBuilder_ != null) { + result.restartPolicy_ = taskRestartPolicyBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.Connector) { + return mergeFrom((com.google.cloud.managedkafka.v1.Connector) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.Connector other) { + if (other == com.google.cloud.managedkafka.v1.Connector.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000002; + onChanged(); + } + internalGetMutableConfigs().mergeFrom(other.internalGetConfigs()); + bitField0_ |= 0x00000004; + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + switch (other.getRestartPolicyCase()) { + case TASK_RESTART_POLICY: + { + mergeTaskRestartPolicy(other.getTaskRestartPolicy()); + break; + } + case RESTARTPOLICY_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 10 + case 18: + { + com.google.protobuf.MapEntry configs__ = + input.readMessage( + ConfigsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableConfigs() + .getMutableMap() + .put(configs__.getKey(), configs__.getValue()); + bitField0_ |= 0x00000004; + break; + } // case 18 + case 24: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 24 + case 34: + { + input.readMessage( + getTaskRestartPolicyFieldBuilder().getBuilder(), extensionRegistry); + restartPolicyCase_ = 4; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int restartPolicyCase_ = 0; + private java.lang.Object restartPolicy_; + + public RestartPolicyCase getRestartPolicyCase() { + return RestartPolicyCase.forNumber(restartPolicyCase_); + } + + public Builder clearRestartPolicy() { + restartPolicyCase_ = 0; + restartPolicy_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.TaskRetryPolicy, + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder, + com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder> + taskRestartPolicyBuilder_; + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the taskRestartPolicy field is set. + */ + @java.lang.Override + public boolean hasTaskRestartPolicy() { + return restartPolicyCase_ == 4; + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The taskRestartPolicy. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy getTaskRestartPolicy() { + if (taskRestartPolicyBuilder_ == null) { + if (restartPolicyCase_ == 4) { + return (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_; + } + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } else { + if (restartPolicyCase_ == 4) { + return taskRestartPolicyBuilder_.getMessage(); + } + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTaskRestartPolicy(com.google.cloud.managedkafka.v1.TaskRetryPolicy value) { + if (taskRestartPolicyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + restartPolicy_ = value; + onChanged(); + } else { + taskRestartPolicyBuilder_.setMessage(value); + } + restartPolicyCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTaskRestartPolicy( + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder builderForValue) { + if (taskRestartPolicyBuilder_ == null) { + restartPolicy_ = builderForValue.build(); + onChanged(); + } else { + taskRestartPolicyBuilder_.setMessage(builderForValue.build()); + } + restartPolicyCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeTaskRestartPolicy(com.google.cloud.managedkafka.v1.TaskRetryPolicy value) { + if (taskRestartPolicyBuilder_ == null) { + if (restartPolicyCase_ == 4 + && restartPolicy_ + != com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance()) { + restartPolicy_ = + com.google.cloud.managedkafka.v1.TaskRetryPolicy.newBuilder( + (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_) + .mergeFrom(value) + .buildPartial(); + } else { + restartPolicy_ = value; + } + onChanged(); + } else { + if (restartPolicyCase_ == 4) { + taskRestartPolicyBuilder_.mergeFrom(value); + } else { + taskRestartPolicyBuilder_.setMessage(value); + } + } + restartPolicyCase_ = 4; + return this; + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearTaskRestartPolicy() { + if (taskRestartPolicyBuilder_ == null) { + if (restartPolicyCase_ == 4) { + restartPolicyCase_ = 0; + restartPolicy_ = null; + onChanged(); + } + } else { + if (restartPolicyCase_ == 4) { + restartPolicyCase_ = 0; + restartPolicy_ = null; + } + taskRestartPolicyBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder getTaskRestartPolicyBuilder() { + return getTaskRestartPolicyFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder + getTaskRestartPolicyOrBuilder() { + if ((restartPolicyCase_ == 4) && (taskRestartPolicyBuilder_ != null)) { + return taskRestartPolicyBuilder_.getMessageOrBuilder(); + } else { + if (restartPolicyCase_ == 4) { + return (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_; + } + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Restarts the individual tasks of a Connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.TaskRetryPolicy, + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder, + com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder> + getTaskRestartPolicyFieldBuilder() { + if (taskRestartPolicyBuilder_ == null) { + if (!(restartPolicyCase_ == 4)) { + restartPolicy_ = com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + taskRestartPolicyBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.TaskRetryPolicy, + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder, + com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder>( + (com.google.cloud.managedkafka.v1.TaskRetryPolicy) restartPolicy_, + getParentForChildren(), + isClean()); + restartPolicy_ = null; + } + restartPolicyCase_ = 4; + onChanged(); + return taskRestartPolicyBuilder_; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Identifier. The name of the connector.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Identifier. The name of the connector.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Identifier. The name of the connector.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Identifier. The name of the connector.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Identifier. The name of the connector.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.MapField configs_; + + private com.google.protobuf.MapField internalGetConfigs() { + if (configs_ == null) { + return com.google.protobuf.MapField.emptyMapField(ConfigsDefaultEntryHolder.defaultEntry); + } + return configs_; + } + + private com.google.protobuf.MapField + internalGetMutableConfigs() { + if (configs_ == null) { + configs_ = com.google.protobuf.MapField.newMapField(ConfigsDefaultEntryHolder.defaultEntry); + } + if (!configs_.isMutable()) { + configs_ = configs_.copy(); + } + bitField0_ |= 0x00000004; + onChanged(); + return configs_; + } + + public int getConfigsCount() { + return internalGetConfigs().getMap().size(); + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public boolean containsConfigs(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetConfigs().getMap().containsKey(key); + } + /** Use {@link #getConfigsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getConfigs() { + return getConfigsMap(); + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.util.Map getConfigsMap() { + return internalGetConfigs().getMap(); + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public /* nullable */ java.lang.String getConfigsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfigs().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public java.lang.String getConfigsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetConfigs().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearConfigs() { + bitField0_ = (bitField0_ & ~0x00000004); + internalGetMutableConfigs().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder removeConfigs(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableConfigs().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableConfigs() { + bitField0_ |= 0x00000004; + return internalGetMutableConfigs().getMutableMap(); + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putConfigs(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableConfigs().getMutableMap().put(key, value); + bitField0_ |= 0x00000004; + return this; + } + /** + * + * + *
+     * Optional. Connector config as keys/values.
+     * The keys of the map are connector property names, for example:
+     * `connector.class`, `tasks.max`, `key.converter`.
+     * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder putAllConfigs(java.util.Map values) { + internalGetMutableConfigs().getMutableMap().putAll(values); + bitField0_ |= 0x00000004; + return this; + } + + private int state_ = 0; + /** + * + * + *
+     * Output only. The current state of the connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + /** + * + * + *
+     * Output only. The current state of the connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The current state of the connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector.State getState() { + com.google.cloud.managedkafka.v1.Connector.State result = + com.google.cloud.managedkafka.v1.Connector.State.forNumber(state_); + return result == null + ? com.google.cloud.managedkafka.v1.Connector.State.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Output only. The current state of the connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.cloud.managedkafka.v1.Connector.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. The current state of the connector.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000008); + state_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.Connector) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.Connector) + private static final com.google.cloud.managedkafka.v1.Connector DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.Connector(); + } + + public static com.google.cloud.managedkafka.v1.Connector getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Connector parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorName.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorName.java new file mode 100644 index 000000000000..e5ee4457f5cf --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorName.java @@ -0,0 +1,269 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ConnectorName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_CONNECT_CLUSTER_CONNECTOR = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String connectCluster; + private final String connector; + + @Deprecated + protected ConnectorName() { + project = null; + location = null; + connectCluster = null; + connector = null; + } + + private ConnectorName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + connectCluster = Preconditions.checkNotNull(builder.getConnectCluster()); + connector = Preconditions.checkNotNull(builder.getConnector()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getConnectCluster() { + return connectCluster; + } + + public String getConnector() { + return connector; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ConnectorName of( + String project, String location, String connectCluster, String connector) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setConnectCluster(connectCluster) + .setConnector(connector) + .build(); + } + + public static String format( + String project, String location, String connectCluster, String connector) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setConnectCluster(connectCluster) + .setConnector(connector) + .build() + .toString(); + } + + public static ConnectorName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_CONNECT_CLUSTER_CONNECTOR.validatedMatch( + formattedString, "ConnectorName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("connect_cluster"), + matchMap.get("connector")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ConnectorName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_CONNECT_CLUSTER_CONNECTOR.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (connectCluster != null) { + fieldMapBuilder.put("connect_cluster", connectCluster); + } + if (connector != null) { + fieldMapBuilder.put("connector", connector); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_CONNECT_CLUSTER_CONNECTOR.instantiate( + "project", + project, + "location", + location, + "connect_cluster", + connectCluster, + "connector", + connector); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ConnectorName that = ((ConnectorName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.connectCluster, that.connectCluster) + && Objects.equals(this.connector, that.connector); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(connectCluster); + h *= 1000003; + h ^= Objects.hashCode(connector); + return h; + } + + /** + * Builder for + * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}. + */ + public static class Builder { + private String project; + private String location; + private String connectCluster; + private String connector; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getConnectCluster() { + return connectCluster; + } + + public String getConnector() { + return connector; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setConnectCluster(String connectCluster) { + this.connectCluster = connectCluster; + return this; + } + + public Builder setConnector(String connector) { + this.connector = connector; + return this; + } + + private Builder(ConnectorName connectorName) { + this.project = connectorName.project; + this.location = connectorName.location; + this.connectCluster = connectorName.connectCluster; + this.connector = connectorName.connector; + } + + public ConnectorName build() { + return new ConnectorName(this); + } + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorOrBuilder.java new file mode 100644 index 000000000000..2844ef7bdb0d --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ConnectorOrBuilder.java @@ -0,0 +1,195 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ConnectorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.Connector) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the taskRestartPolicy field is set. + */ + boolean hasTaskRestartPolicy(); + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The taskRestartPolicy. + */ + com.google.cloud.managedkafka.v1.TaskRetryPolicy getTaskRestartPolicy(); + /** + * + * + *
+   * Optional. Restarts the individual tasks of a Connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.TaskRetryPolicy task_restart_policy = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder getTaskRestartPolicyOrBuilder(); + + /** + * + * + *
+   * Identifier. The name of the connector.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Identifier. The name of the connector.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + int getConfigsCount(); + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + boolean containsConfigs(java.lang.String key); + /** Use {@link #getConfigsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getConfigs(); + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.util.Map getConfigsMap(); + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + /* nullable */ + java.lang.String getConfigsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + /** + * + * + *
+   * Optional. Connector config as keys/values.
+   * The keys of the map are connector property names, for example:
+   * `connector.class`, `tasks.max`, `key.converter`.
+   * 
+ * + * map<string, string> configs = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + java.lang.String getConfigsOrThrow(java.lang.String key); + + /** + * + * + *
+   * Output only. The current state of the connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + /** + * + * + *
+   * Output only. The current state of the connector.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector.State state = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.cloud.managedkafka.v1.Connector.State getState(); + + com.google.cloud.managedkafka.v1.Connector.RestartPolicyCase getRestartPolicyCase(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequest.java new file mode 100644 index 000000000000..0854107d3a1a --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequest.java @@ -0,0 +1,1480 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for CreateConnectCluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.CreateConnectClusterRequest} + */ +public final class CreateConnectClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.CreateConnectClusterRequest) + CreateConnectClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateConnectClusterRequest.newBuilder() to construct. + private CreateConnectClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateConnectClusterRequest() { + parent_ = ""; + connectClusterId_ = ""; + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateConnectClusterRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
+   * Required. The parent project/location in which to create the Kafka Connect
+   * cluster. Structured like
+   * `projects/{project}/locations/{location}/`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent project/location in which to create the Kafka Connect
+   * cluster. Structured like
+   * `projects/{project}/locations/{location}/`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONNECT_CLUSTER_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object connectClusterId_ = ""; + /** + * + * + *
+   * Required. The ID to use for the Connect cluster, which will become the
+   * final component of the cluster's name. The ID must be 1-63 characters long,
+   * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+   * with RFC 1035.
+   *
+   * This value is structured like: `my-cluster-id`.
+   * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectClusterId. + */ + @java.lang.Override + public java.lang.String getConnectClusterId() { + java.lang.Object ref = connectClusterId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + connectClusterId_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The ID to use for the Connect cluster, which will become the
+   * final component of the cluster's name. The ID must be 1-63 characters long,
+   * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+   * with RFC 1035.
+   *
+   * This value is structured like: `my-cluster-id`.
+   * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectClusterId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getConnectClusterIdBytes() { + java.lang.Object ref = connectClusterId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + connectClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONNECT_CLUSTER_FIELD_NUMBER = 3; + private com.google.cloud.managedkafka.v1.ConnectCluster connectCluster_; + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connectCluster field is set. + */ + @java.lang.Override + public boolean hasConnectCluster() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connectCluster. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster() { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder() { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(connectClusterId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, connectClusterId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getConnectCluster()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(connectClusterId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, connectClusterId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getConnectCluster()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.CreateConnectClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest other = + (com.google.cloud.managedkafka.v1.CreateConnectClusterRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getConnectClusterId().equals(other.getConnectClusterId())) return false; + if (hasConnectCluster() != other.hasConnectCluster()) return false; + if (hasConnectCluster()) { + if (!getConnectCluster().equals(other.getConnectCluster())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + CONNECT_CLUSTER_ID_FIELD_NUMBER; + hash = (53 * hash) + getConnectClusterId().hashCode(); + if (hasConnectCluster()) { + hash = (37 * hash) + CONNECT_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getConnectCluster().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for CreateConnectCluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.CreateConnectClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.CreateConnectClusterRequest) + com.google.cloud.managedkafka.v1.CreateConnectClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getConnectClusterFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + connectClusterId_ = ""; + connectCluster_ = null; + if (connectClusterBuilder_ != null) { + connectClusterBuilder_.dispose(); + connectClusterBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectClusterRequest + getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectClusterRequest build() { + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectClusterRequest buildPartial() { + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest result = + new com.google.cloud.managedkafka.v1.CreateConnectClusterRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.managedkafka.v1.CreateConnectClusterRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.connectClusterId_ = connectClusterId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.connectCluster_ = + connectClusterBuilder_ == null ? connectCluster_ : connectClusterBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.CreateConnectClusterRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.CreateConnectClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.CreateConnectClusterRequest other) { + if (other + == com.google.cloud.managedkafka.v1.CreateConnectClusterRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getConnectClusterId().isEmpty()) { + connectClusterId_ = other.connectClusterId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasConnectCluster()) { + mergeConnectCluster(other.getConnectCluster()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + connectClusterId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(getConnectClusterFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent project/location in which to create the Kafka Connect
+     * cluster. Structured like
+     * `projects/{project}/locations/{location}/`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent project/location in which to create the Kafka Connect
+     * cluster. Structured like
+     * `projects/{project}/locations/{location}/`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent project/location in which to create the Kafka Connect
+     * cluster. Structured like
+     * `projects/{project}/locations/{location}/`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent project/location in which to create the Kafka Connect
+     * cluster. Structured like
+     * `projects/{project}/locations/{location}/`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent project/location in which to create the Kafka Connect
+     * cluster. Structured like
+     * `projects/{project}/locations/{location}/`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object connectClusterId_ = ""; + /** + * + * + *
+     * Required. The ID to use for the Connect cluster, which will become the
+     * final component of the cluster's name. The ID must be 1-63 characters long,
+     * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+     * with RFC 1035.
+     *
+     * This value is structured like: `my-cluster-id`.
+     * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectClusterId. + */ + public java.lang.String getConnectClusterId() { + java.lang.Object ref = connectClusterId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + connectClusterId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The ID to use for the Connect cluster, which will become the
+     * final component of the cluster's name. The ID must be 1-63 characters long,
+     * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+     * with RFC 1035.
+     *
+     * This value is structured like: `my-cluster-id`.
+     * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectClusterId. + */ + public com.google.protobuf.ByteString getConnectClusterIdBytes() { + java.lang.Object ref = connectClusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + connectClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The ID to use for the Connect cluster, which will become the
+     * final component of the cluster's name. The ID must be 1-63 characters long,
+     * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+     * with RFC 1035.
+     *
+     * This value is structured like: `my-cluster-id`.
+     * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The connectClusterId to set. + * @return This builder for chaining. + */ + public Builder setConnectClusterId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + connectClusterId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID to use for the Connect cluster, which will become the
+     * final component of the cluster's name. The ID must be 1-63 characters long,
+     * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+     * with RFC 1035.
+     *
+     * This value is structured like: `my-cluster-id`.
+     * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearConnectClusterId() { + connectClusterId_ = getDefaultInstance().getConnectClusterId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID to use for the Connect cluster, which will become the
+     * final component of the cluster's name. The ID must be 1-63 characters long,
+     * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+     * with RFC 1035.
+     *
+     * This value is structured like: `my-cluster-id`.
+     * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for connectClusterId to set. + * @return This builder for chaining. + */ + public Builder setConnectClusterIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + connectClusterId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.cloud.managedkafka.v1.ConnectCluster connectCluster_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + connectClusterBuilder_; + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connectCluster field is set. + */ + public boolean hasConnectCluster() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connectCluster. + */ + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster() { + if (connectClusterBuilder_ == null) { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } else { + return connectClusterBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setConnectCluster(com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + connectCluster_ = value; + } else { + connectClusterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setConnectCluster( + com.google.cloud.managedkafka.v1.ConnectCluster.Builder builderForValue) { + if (connectClusterBuilder_ == null) { + connectCluster_ = builderForValue.build(); + } else { + connectClusterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeConnectCluster(com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClusterBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && connectCluster_ != null + && connectCluster_ + != com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance()) { + getConnectClusterBuilder().mergeFrom(value); + } else { + connectCluster_ = value; + } + } else { + connectClusterBuilder_.mergeFrom(value); + } + if (connectCluster_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearConnectCluster() { + bitField0_ = (bitField0_ & ~0x00000004); + connectCluster_ = null; + if (connectClusterBuilder_ != null) { + connectClusterBuilder_.dispose(); + connectClusterBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectCluster.Builder getConnectClusterBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getConnectClusterFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder() { + if (connectClusterBuilder_ != null) { + return connectClusterBuilder_.getMessageOrBuilder(); + } else { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + } + /** + * + * + *
+     * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+     * field is ignored.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + getConnectClusterFieldBuilder() { + if (connectClusterBuilder_ == null) { + connectClusterBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder>( + getConnectCluster(), getParentForChildren(), isClean()); + connectCluster_ = null; + } + return connectClusterBuilder_; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.CreateConnectClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.CreateConnectClusterRequest) + private static final com.google.cloud.managedkafka.v1.CreateConnectClusterRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.CreateConnectClusterRequest(); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateConnectClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequestOrBuilder.java new file mode 100644 index 000000000000..5ff35b7dd03d --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectClusterRequestOrBuilder.java @@ -0,0 +1,193 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface CreateConnectClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.CreateConnectClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent project/location in which to create the Kafka Connect
+   * cluster. Structured like
+   * `projects/{project}/locations/{location}/`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent project/location in which to create the Kafka Connect
+   * cluster. Structured like
+   * `projects/{project}/locations/{location}/`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The ID to use for the Connect cluster, which will become the
+   * final component of the cluster's name. The ID must be 1-63 characters long,
+   * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+   * with RFC 1035.
+   *
+   * This value is structured like: `my-cluster-id`.
+   * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectClusterId. + */ + java.lang.String getConnectClusterId(); + /** + * + * + *
+   * Required. The ID to use for the Connect cluster, which will become the
+   * final component of the cluster's name. The ID must be 1-63 characters long,
+   * and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply
+   * with RFC 1035.
+   *
+   * This value is structured like: `my-cluster-id`.
+   * 
+ * + * string connect_cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectClusterId. + */ + com.google.protobuf.ByteString getConnectClusterIdBytes(); + + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connectCluster field is set. + */ + boolean hasConnectCluster(); + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connectCluster. + */ + com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster(); + /** + * + * + *
+   * Required. Configuration of the Kafka Connect cluster to create. Its `name`
+   * field is ignored.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder(); + + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequest.java new file mode 100644 index 000000000000..75dda2ce5e2a --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequest.java @@ -0,0 +1,1175 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for CreateConnector.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.CreateConnectorRequest} + */ +public final class CreateConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.CreateConnectorRequest) + CreateConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateConnectorRequest.newBuilder() to construct. + private CreateConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateConnectorRequest() { + parent_ = ""; + connectorId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.CreateConnectorRequest.class, + com.google.cloud.managedkafka.v1.CreateConnectorRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
+   * Required. The parent Connect cluster in which to create the connector.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent Connect cluster in which to create the connector.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONNECTOR_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object connectorId_ = ""; + /** + * + * + *
+   * Required. The ID to use for the connector, which will become the final
+   * component of the connector's name. The ID must be 1-63 characters long, and
+   * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+   * RFC 1035.
+   *
+   * This value is structured like: `my-connector-id`.
+   * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectorId. + */ + @java.lang.Override + public java.lang.String getConnectorId() { + java.lang.Object ref = connectorId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + connectorId_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The ID to use for the connector, which will become the final
+   * component of the connector's name. The ID must be 1-63 characters long, and
+   * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+   * RFC 1035.
+   *
+   * This value is structured like: `my-connector-id`.
+   * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectorId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getConnectorIdBytes() { + java.lang.Object ref = connectorId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + connectorId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONNECTOR_FIELD_NUMBER = 3; + private com.google.cloud.managedkafka.v1.Connector connector_; + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connector field is set. + */ + @java.lang.Override + public boolean hasConnector() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connector. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector getConnector() { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder() { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(connectorId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, connectorId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getConnector()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(connectorId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, connectorId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getConnector()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.CreateConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.CreateConnectorRequest other = + (com.google.cloud.managedkafka.v1.CreateConnectorRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getConnectorId().equals(other.getConnectorId())) return false; + if (hasConnector() != other.hasConnector()) return false; + if (hasConnector()) { + if (!getConnector().equals(other.getConnector())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + CONNECTOR_ID_FIELD_NUMBER; + hash = (53 * hash) + getConnectorId().hashCode(); + if (hasConnector()) { + hash = (37 * hash) + CONNECTOR_FIELD_NUMBER; + hash = (53 * hash) + getConnector().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.CreateConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for CreateConnector.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.CreateConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.CreateConnectorRequest) + com.google.cloud.managedkafka.v1.CreateConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.CreateConnectorRequest.class, + com.google.cloud.managedkafka.v1.CreateConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.CreateConnectorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getConnectorFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + connectorId_ = ""; + connector_ = null; + if (connectorBuilder_ != null) { + connectorBuilder_.dispose(); + connectorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.CreateConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectorRequest build() { + com.google.cloud.managedkafka.v1.CreateConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.CreateConnectorRequest result = + new com.google.cloud.managedkafka.v1.CreateConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.CreateConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.connectorId_ = connectorId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.connector_ = connectorBuilder_ == null ? connector_ : connectorBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.CreateConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.CreateConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.CreateConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.CreateConnectorRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getConnectorId().isEmpty()) { + connectorId_ = other.connectorId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasConnector()) { + mergeConnector(other.getConnector()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + connectorId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(getConnectorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent Connect cluster in which to create the connector.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent Connect cluster in which to create the connector.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent Connect cluster in which to create the connector.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent Connect cluster in which to create the connector.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent Connect cluster in which to create the connector.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object connectorId_ = ""; + /** + * + * + *
+     * Required. The ID to use for the connector, which will become the final
+     * component of the connector's name. The ID must be 1-63 characters long, and
+     * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+     * RFC 1035.
+     *
+     * This value is structured like: `my-connector-id`.
+     * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectorId. + */ + public java.lang.String getConnectorId() { + java.lang.Object ref = connectorId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + connectorId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The ID to use for the connector, which will become the final
+     * component of the connector's name. The ID must be 1-63 characters long, and
+     * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+     * RFC 1035.
+     *
+     * This value is structured like: `my-connector-id`.
+     * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectorId. + */ + public com.google.protobuf.ByteString getConnectorIdBytes() { + java.lang.Object ref = connectorId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + connectorId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The ID to use for the connector, which will become the final
+     * component of the connector's name. The ID must be 1-63 characters long, and
+     * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+     * RFC 1035.
+     *
+     * This value is structured like: `my-connector-id`.
+     * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The connectorId to set. + * @return This builder for chaining. + */ + public Builder setConnectorId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + connectorId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID to use for the connector, which will become the final
+     * component of the connector's name. The ID must be 1-63 characters long, and
+     * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+     * RFC 1035.
+     *
+     * This value is structured like: `my-connector-id`.
+     * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearConnectorId() { + connectorId_ = getDefaultInstance().getConnectorId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The ID to use for the connector, which will become the final
+     * component of the connector's name. The ID must be 1-63 characters long, and
+     * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+     * RFC 1035.
+     *
+     * This value is structured like: `my-connector-id`.
+     * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for connectorId to set. + * @return This builder for chaining. + */ + public Builder setConnectorIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + connectorId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.cloud.managedkafka.v1.Connector connector_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + connectorBuilder_; + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connector field is set. + */ + public boolean hasConnector() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connector. + */ + public com.google.cloud.managedkafka.v1.Connector getConnector() { + if (connectorBuilder_ == null) { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } else { + return connectorBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setConnector(com.google.cloud.managedkafka.v1.Connector value) { + if (connectorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + connector_ = value; + } else { + connectorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setConnector( + com.google.cloud.managedkafka.v1.Connector.Builder builderForValue) { + if (connectorBuilder_ == null) { + connector_ = builderForValue.build(); + } else { + connectorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeConnector(com.google.cloud.managedkafka.v1.Connector value) { + if (connectorBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && connector_ != null + && connector_ != com.google.cloud.managedkafka.v1.Connector.getDefaultInstance()) { + getConnectorBuilder().mergeFrom(value); + } else { + connector_ = value; + } + } else { + connectorBuilder_.mergeFrom(value); + } + if (connector_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearConnector() { + bitField0_ = (bitField0_ & ~0x00000004); + connector_ = null; + if (connectorBuilder_ != null) { + connectorBuilder_.dispose(); + connectorBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.Connector.Builder getConnectorBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getConnectorFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder() { + if (connectorBuilder_ != null) { + return connectorBuilder_.getMessageOrBuilder(); + } else { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + } + /** + * + * + *
+     * Required. The connector to create.
+     * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + getConnectorFieldBuilder() { + if (connectorBuilder_ == null) { + connectorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder>( + getConnector(), getParentForChildren(), isClean()); + connector_ = null; + } + return connectorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.CreateConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.CreateConnectorRequest) + private static final com.google.cloud.managedkafka.v1.CreateConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.CreateConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.CreateConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.CreateConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..7e741839da5a --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/CreateConnectorRequestOrBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface CreateConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.CreateConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent Connect cluster in which to create the connector.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent Connect cluster in which to create the connector.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. The ID to use for the connector, which will become the final
+   * component of the connector's name. The ID must be 1-63 characters long, and
+   * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+   * RFC 1035.
+   *
+   * This value is structured like: `my-connector-id`.
+   * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The connectorId. + */ + java.lang.String getConnectorId(); + /** + * + * + *
+   * Required. The ID to use for the connector, which will become the final
+   * component of the connector's name. The ID must be 1-63 characters long, and
+   * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
+   * RFC 1035.
+   *
+   * This value is structured like: `my-connector-id`.
+   * 
+ * + * string connector_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for connectorId. + */ + com.google.protobuf.ByteString getConnectorIdBytes(); + + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the connector field is set. + */ + boolean hasConnector(); + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The connector. + */ + com.google.cloud.managedkafka.v1.Connector getConnector(); + /** + * + * + *
+   * Required. The connector to create.
+   * 
+ * + * + * .google.cloud.managedkafka.v1.Connector connector = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequest.java new file mode 100644 index 000000000000..209d3d7d52ba --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequest.java @@ -0,0 +1,945 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for DeleteConnectCluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.DeleteConnectClusterRequest} + */ +public final class DeleteConnectClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.DeleteConnectClusterRequest) + DeleteConnectClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use DeleteConnectClusterRequest.newBuilder() to construct. + private DeleteConnectClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DeleteConnectClusterRequest() { + name_ = ""; + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DeleteConnectClusterRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster to delete.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster to delete.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest other = + (com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for DeleteConnectCluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.DeleteConnectClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.DeleteConnectClusterRequest) + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest + getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest build() { + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest buildPartial() { + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest result = + new com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestId_ = requestId_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest other) { + if (other + == com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster to delete.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster to delete.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster to delete.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster to delete.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster to delete.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.DeleteConnectClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.DeleteConnectClusterRequest) + private static final com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest(); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteConnectClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequestOrBuilder.java new file mode 100644 index 000000000000..f96de9ed289f --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectClusterRequestOrBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface DeleteConnectClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.DeleteConnectClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster to delete.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster to delete.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * + * + * string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequest.java new file mode 100644 index 000000000000..f32d4da6b259 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for DeleteConnector.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.DeleteConnectorRequest} + */ +public final class DeleteConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.DeleteConnectorRequest) + DeleteConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use DeleteConnectorRequest.newBuilder() to construct. + private DeleteConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DeleteConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DeleteConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest.class, + com.google.cloud.managedkafka.v1.DeleteConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Required. The name of the connector to delete.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the connector to delete.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.DeleteConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.DeleteConnectorRequest other = + (com.google.cloud.managedkafka.v1.DeleteConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for DeleteConnector.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.DeleteConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.DeleteConnectorRequest) + com.google.cloud.managedkafka.v1.DeleteConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.DeleteConnectorRequest.class, + com.google.cloud.managedkafka.v1.DeleteConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.DeleteConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.DeleteConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectorRequest build() { + com.google.cloud.managedkafka.v1.DeleteConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.DeleteConnectorRequest result = + new com.google.cloud.managedkafka.v1.DeleteConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.DeleteConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.DeleteConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.DeleteConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.DeleteConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.DeleteConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the connector to delete.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector to delete.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector to delete.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector to delete.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector to delete.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.DeleteConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.DeleteConnectorRequest) + private static final com.google.cloud.managedkafka.v1.DeleteConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.DeleteConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.DeleteConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.DeleteConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..949338ccfe8c --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/DeleteConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface DeleteConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.DeleteConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the connector to delete.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the connector to delete.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequest.java new file mode 100644 index 000000000000..5900751aa57f --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for GetConnectCluster.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.GetConnectClusterRequest} + */ +public final class GetConnectClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.GetConnectClusterRequest) + GetConnectClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetConnectClusterRequest.newBuilder() to construct. + private GetConnectClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetConnectClusterRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetConnectClusterRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.GetConnectClusterRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster whose configuration to
+   * return. Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster whose configuration to
+   * return. Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.GetConnectClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.GetConnectClusterRequest other = + (com.google.cloud.managedkafka.v1.GetConnectClusterRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for GetConnectCluster.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.GetConnectClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.GetConnectClusterRequest) + com.google.cloud.managedkafka.v1.GetConnectClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.GetConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.GetConnectClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.GetConnectClusterRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectClusterRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.GetConnectClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectClusterRequest build() { + com.google.cloud.managedkafka.v1.GetConnectClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectClusterRequest buildPartial() { + com.google.cloud.managedkafka.v1.GetConnectClusterRequest result = + new com.google.cloud.managedkafka.v1.GetConnectClusterRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.GetConnectClusterRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.GetConnectClusterRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.GetConnectClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.GetConnectClusterRequest other) { + if (other == com.google.cloud.managedkafka.v1.GetConnectClusterRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster whose configuration to
+     * return. Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster whose configuration to
+     * return. Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster whose configuration to
+     * return. Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster whose configuration to
+     * return. Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the Kafka Connect cluster whose configuration to
+     * return. Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.GetConnectClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.GetConnectClusterRequest) + private static final com.google.cloud.managedkafka.v1.GetConnectClusterRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.GetConnectClusterRequest(); + } + + public static com.google.cloud.managedkafka.v1.GetConnectClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetConnectClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequestOrBuilder.java new file mode 100644 index 000000000000..00da0439d91a --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectClusterRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface GetConnectClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.GetConnectClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster whose configuration to
+   * return. Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the Kafka Connect cluster whose configuration to
+   * return. Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequest.java new file mode 100644 index 000000000000..fc1f92f03ddf --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequest.java @@ -0,0 +1,653 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for GetConnector.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.GetConnectorRequest} + */ +public final class GetConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.GetConnectorRequest) + GetConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetConnectorRequest.newBuilder() to construct. + private GetConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.GetConnectorRequest.class, + com.google.cloud.managedkafka.v1.GetConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Required. The name of the connector whose configuration to return.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the connector whose configuration to return.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.GetConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.GetConnectorRequest other = + (com.google.cloud.managedkafka.v1.GetConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.GetConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for GetConnector.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.GetConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.GetConnectorRequest) + com.google.cloud.managedkafka.v1.GetConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.GetConnectorRequest.class, + com.google.cloud.managedkafka.v1.GetConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.GetConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.GetConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectorRequest build() { + com.google.cloud.managedkafka.v1.GetConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.GetConnectorRequest result = + new com.google.cloud.managedkafka.v1.GetConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.GetConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.GetConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.GetConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.GetConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.GetConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the connector whose configuration to return.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector whose configuration to return.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector whose configuration to return.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector whose configuration to return.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector whose configuration to return.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.GetConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.GetConnectorRequest) + private static final com.google.cloud.managedkafka.v1.GetConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.GetConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.GetConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.GetConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..466ef77368fc --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/GetConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface GetConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.GetConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the connector whose configuration to return.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. The name of the connector whose configuration to return.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequest.java new file mode 100644 index 000000000000..d5c8b76116f3 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequest.java @@ -0,0 +1,1322 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for ListConnectClusters.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectClustersRequest} + */ +public final class ListConnectClustersRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ListConnectClustersRequest) + ListConnectClustersRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListConnectClustersRequest.newBuilder() to construct. + private ListConnectClustersRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListConnectClustersRequest() { + parent_ = ""; + pageToken_ = ""; + filter_ = ""; + orderBy_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListConnectClustersRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest.class, + com.google.cloud.managedkafka.v1.ListConnectClustersRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
+   * Required. The parent project/location whose Connect clusters are to be
+   * listed. Structured like `projects/{project}/locations/{location}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent project/location whose Connect clusters are to be
+   * listed. Structured like `projects/{project}/locations/{location}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + /** + * + * + *
+   * Optional. The maximum number of Connect clusters to return. The service may
+   * return fewer than this value. If unspecified, server will pick an
+   * appropriate default.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectClusters`
+   * call. Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectClusters`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectClusters`
+   * call. Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectClusters`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + /** + * + * + *
+   * Optional. Filter expression for the result.
+   * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. Filter expression for the result.
+   * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ORDER_BY_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object orderBy_ = ""; + /** + * + * + *
+   * Optional. Order by fields for the result.
+   * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The orderBy. + */ + @java.lang.Override + public java.lang.String getOrderBy() { + java.lang.Object ref = orderBy_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderBy_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. Order by fields for the result.
+   * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for orderBy. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOrderByBytes() { + java.lang.Object ref = orderBy_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + orderBy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderBy_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, orderBy_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, filter_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderBy_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, orderBy_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ListConnectClustersRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ListConnectClustersRequest other = + (com.google.cloud.managedkafka.v1.ListConnectClustersRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getOrderBy().equals(other.getOrderBy())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + ORDER_BY_FIELD_NUMBER; + hash = (53 * hash) + getOrderBy().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for ListConnectClusters.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectClustersRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ListConnectClustersRequest) + com.google.cloud.managedkafka.v1.ListConnectClustersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectClustersRequest.class, + com.google.cloud.managedkafka.v1.ListConnectClustersRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ListConnectClustersRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + filter_ = ""; + orderBy_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ListConnectClustersRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersRequest build() { + com.google.cloud.managedkafka.v1.ListConnectClustersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersRequest buildPartial() { + com.google.cloud.managedkafka.v1.ListConnectClustersRequest result = + new com.google.cloud.managedkafka.v1.ListConnectClustersRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ListConnectClustersRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.orderBy_ = orderBy_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ListConnectClustersRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.ListConnectClustersRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ListConnectClustersRequest other) { + if (other == com.google.cloud.managedkafka.v1.ListConnectClustersRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getOrderBy().isEmpty()) { + orderBy_ = other.orderBy_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + orderBy_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent project/location whose Connect clusters are to be
+     * listed. Structured like `projects/{project}/locations/{location}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent project/location whose Connect clusters are to be
+     * listed. Structured like `projects/{project}/locations/{location}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent project/location whose Connect clusters are to be
+     * listed. Structured like `projects/{project}/locations/{location}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent project/location whose Connect clusters are to be
+     * listed. Structured like `projects/{project}/locations/{location}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent project/location whose Connect clusters are to be
+     * listed. Structured like `projects/{project}/locations/{location}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + /** + * + * + *
+     * Optional. The maximum number of Connect clusters to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + /** + * + * + *
+     * Optional. The maximum number of Connect clusters to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The maximum number of Connect clusters to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectClusters`
+     * call. Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectClusters`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectClusters`
+     * call. Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectClusters`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectClusters`
+     * call. Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectClusters`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectClusters`
+     * call. Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectClusters`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectClusters`
+     * call. Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectClusters`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + /** + * + * + *
+     * Optional. Filter expression for the result.
+     * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. Filter expression for the result.
+     * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. Filter expression for the result.
+     * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Filter expression for the result.
+     * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Filter expression for the result.
+     * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object orderBy_ = ""; + /** + * + * + *
+     * Optional. Order by fields for the result.
+     * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The orderBy. + */ + public java.lang.String getOrderBy() { + java.lang.Object ref = orderBy_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderBy_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. Order by fields for the result.
+     * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for orderBy. + */ + public com.google.protobuf.ByteString getOrderByBytes() { + java.lang.Object ref = orderBy_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + orderBy_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. Order by fields for the result.
+     * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderBy(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + orderBy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Order by fields for the result.
+     * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOrderBy() { + orderBy_ = getDefaultInstance().getOrderBy(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Order by fields for the result.
+     * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderByBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + orderBy_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ListConnectClustersRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ListConnectClustersRequest) + private static final com.google.cloud.managedkafka.v1.ListConnectClustersRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ListConnectClustersRequest(); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListConnectClustersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequestOrBuilder.java new file mode 100644 index 000000000000..1e07b9fda3a7 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersRequestOrBuilder.java @@ -0,0 +1,155 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ListConnectClustersRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ListConnectClustersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent project/location whose Connect clusters are to be
+   * listed. Structured like `projects/{project}/locations/{location}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent project/location whose Connect clusters are to be
+   * listed. Structured like `projects/{project}/locations/{location}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. The maximum number of Connect clusters to return. The service may
+   * return fewer than this value. If unspecified, server will pick an
+   * appropriate default.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectClusters`
+   * call. Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectClusters`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectClusters`
+   * call. Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectClusters`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
+   * Optional. Filter expression for the result.
+   * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + /** + * + * + *
+   * Optional. Filter expression for the result.
+   * 
+ * + * string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
+   * Optional. Order by fields for the result.
+   * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The orderBy. + */ + java.lang.String getOrderBy(); + /** + * + * + *
+   * Optional. Order by fields for the result.
+   * 
+ * + * string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for orderBy. + */ + com.google.protobuf.ByteString getOrderByBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponse.java new file mode 100644 index 000000000000..f9409caf8cd6 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponse.java @@ -0,0 +1,1421 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Response for ListConnectClusters.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectClustersResponse} + */ +public final class ListConnectClustersResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ListConnectClustersResponse) + ListConnectClustersResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListConnectClustersResponse.newBuilder() to construct. + private ListConnectClustersResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListConnectClustersResponse() { + connectClusters_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListConnectClustersResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse.class, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse.Builder.class); + } + + public static final int CONNECT_CLUSTERS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List connectClusters_; + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + @java.lang.Override + public java.util.List getConnectClustersList() { + return connectClusters_; + } + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + @java.lang.Override + public java.util.List + getConnectClustersOrBuilderList() { + return connectClusters_; + } + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + @java.lang.Override + public int getConnectClustersCount() { + return connectClusters_.size(); + } + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectClusters(int index) { + return connectClusters_.get(index); + } + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClustersOrBuilder( + int index) { + return connectClusters_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + return unreachable_; + } + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < connectClusters_.size(); i++) { + output.writeMessage(1, connectClusters_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachable_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, unreachable_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < connectClusters_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, connectClusters_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachable_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachable_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ListConnectClustersResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ListConnectClustersResponse other = + (com.google.cloud.managedkafka.v1.ListConnectClustersResponse) obj; + + if (!getConnectClustersList().equals(other.getConnectClustersList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableList().equals(other.getUnreachableList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getConnectClustersCount() > 0) { + hash = (37 * hash) + CONNECT_CLUSTERS_FIELD_NUMBER; + hash = (53 * hash) + getConnectClustersList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableCount() > 0) { + hash = (37 * hash) + UNREACHABLE_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response for ListConnectClusters.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectClustersResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ListConnectClustersResponse) + com.google.cloud.managedkafka.v1.ListConnectClustersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse.class, + com.google.cloud.managedkafka.v1.ListConnectClustersResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ListConnectClustersResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (connectClustersBuilder_ == null) { + connectClusters_ = java.util.Collections.emptyList(); + } else { + connectClusters_ = null; + connectClustersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersResponse + getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ListConnectClustersResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersResponse build() { + com.google.cloud.managedkafka.v1.ListConnectClustersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersResponse buildPartial() { + com.google.cloud.managedkafka.v1.ListConnectClustersResponse result = + new com.google.cloud.managedkafka.v1.ListConnectClustersResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse result) { + if (connectClustersBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + connectClusters_ = java.util.Collections.unmodifiableList(connectClusters_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.connectClusters_ = connectClusters_; + } else { + result.connectClusters_ = connectClustersBuilder_.build(); + } + } + + private void buildPartial0( + com.google.cloud.managedkafka.v1.ListConnectClustersResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachable_.makeImmutable(); + result.unreachable_ = unreachable_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ListConnectClustersResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.ListConnectClustersResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ListConnectClustersResponse other) { + if (other + == com.google.cloud.managedkafka.v1.ListConnectClustersResponse.getDefaultInstance()) + return this; + if (connectClustersBuilder_ == null) { + if (!other.connectClusters_.isEmpty()) { + if (connectClusters_.isEmpty()) { + connectClusters_ = other.connectClusters_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureConnectClustersIsMutable(); + connectClusters_.addAll(other.connectClusters_); + } + onChanged(); + } + } else { + if (!other.connectClusters_.isEmpty()) { + if (connectClustersBuilder_.isEmpty()) { + connectClustersBuilder_.dispose(); + connectClustersBuilder_ = null; + connectClusters_ = other.connectClusters_; + bitField0_ = (bitField0_ & ~0x00000001); + connectClustersBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getConnectClustersFieldBuilder() + : null; + } else { + connectClustersBuilder_.addAllMessages(other.connectClusters_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachable_.isEmpty()) { + if (unreachable_.isEmpty()) { + unreachable_ = other.unreachable_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableIsMutable(); + unreachable_.addAll(other.unreachable_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.managedkafka.v1.ConnectCluster m = + input.readMessage( + com.google.cloud.managedkafka.v1.ConnectCluster.parser(), + extensionRegistry); + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + connectClusters_.add(m); + } else { + connectClustersBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableIsMutable(); + unreachable_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List connectClusters_ = + java.util.Collections.emptyList(); + + private void ensureConnectClustersIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + connectClusters_ = + new java.util.ArrayList( + connectClusters_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + connectClustersBuilder_; + + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public java.util.List + getConnectClustersList() { + if (connectClustersBuilder_ == null) { + return java.util.Collections.unmodifiableList(connectClusters_); + } else { + return connectClustersBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public int getConnectClustersCount() { + if (connectClustersBuilder_ == null) { + return connectClusters_.size(); + } else { + return connectClustersBuilder_.getCount(); + } + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectClusters(int index) { + if (connectClustersBuilder_ == null) { + return connectClusters_.get(index); + } else { + return connectClustersBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder setConnectClusters( + int index, com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectClustersIsMutable(); + connectClusters_.set(index, value); + onChanged(); + } else { + connectClustersBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder setConnectClusters( + int index, com.google.cloud.managedkafka.v1.ConnectCluster.Builder builderForValue) { + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + connectClusters_.set(index, builderForValue.build()); + onChanged(); + } else { + connectClustersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder addConnectClusters(com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectClustersIsMutable(); + connectClusters_.add(value); + onChanged(); + } else { + connectClustersBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder addConnectClusters( + int index, com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectClustersIsMutable(); + connectClusters_.add(index, value); + onChanged(); + } else { + connectClustersBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder addConnectClusters( + com.google.cloud.managedkafka.v1.ConnectCluster.Builder builderForValue) { + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + connectClusters_.add(builderForValue.build()); + onChanged(); + } else { + connectClustersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder addConnectClusters( + int index, com.google.cloud.managedkafka.v1.ConnectCluster.Builder builderForValue) { + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + connectClusters_.add(index, builderForValue.build()); + onChanged(); + } else { + connectClustersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder addAllConnectClusters( + java.lang.Iterable values) { + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, connectClusters_); + onChanged(); + } else { + connectClustersBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder clearConnectClusters() { + if (connectClustersBuilder_ == null) { + connectClusters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + connectClustersBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public Builder removeConnectClusters(int index) { + if (connectClustersBuilder_ == null) { + ensureConnectClustersIsMutable(); + connectClusters_.remove(index); + onChanged(); + } else { + connectClustersBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectCluster.Builder getConnectClustersBuilder( + int index) { + return getConnectClustersFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClustersOrBuilder( + int index) { + if (connectClustersBuilder_ == null) { + return connectClusters_.get(index); + } else { + return connectClustersBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public java.util.List + getConnectClustersOrBuilderList() { + if (connectClustersBuilder_ != null) { + return connectClustersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(connectClusters_); + } + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectCluster.Builder addConnectClustersBuilder() { + return getConnectClustersFieldBuilder() + .addBuilder(com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance()); + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectCluster.Builder addConnectClustersBuilder( + int index) { + return getConnectClustersFieldBuilder() + .addBuilder(index, com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance()); + } + /** + * + * + *
+     * The list of Connect clusters in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + public java.util.List + getConnectClustersBuilderList() { + return getConnectClustersFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + getConnectClustersFieldBuilder() { + if (connectClustersBuilder_ == null) { + connectClustersBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder>( + connectClusters_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + connectClusters_ = null; + } + return connectClustersBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableIsMutable() { + if (!unreachable_.isModifiable()) { + unreachable_ = new com.google.protobuf.LazyStringArrayList(unreachable_); + } + bitField0_ |= 0x00000004; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + unreachable_.makeImmutable(); + return unreachable_; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param index The index to set the value at. + * @param value The unreachable to set. + * @return This builder for chaining. + */ + public Builder setUnreachable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param value The unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param values The unreachable to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachable(java.lang.Iterable values) { + ensureUnreachableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, unreachable_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @return This builder for chaining. + */ + public Builder clearUnreachable() { + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + /** + * + * + *
+     * Locations that could not be reached.
+     * 
+ * + * repeated string unreachable = 3; + * + * @param value The bytes of the unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ListConnectClustersResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ListConnectClustersResponse) + private static final com.google.cloud.managedkafka.v1.ListConnectClustersResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ListConnectClustersResponse(); + } + + public static com.google.cloud.managedkafka.v1.ListConnectClustersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListConnectClustersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectClustersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponseOrBuilder.java new file mode 100644 index 000000000000..fe0868ddf019 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectClustersResponseOrBuilder.java @@ -0,0 +1,156 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ListConnectClustersResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ListConnectClustersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + java.util.List getConnectClustersList(); + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + com.google.cloud.managedkafka.v1.ConnectCluster getConnectClusters(int index); + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + int getConnectClustersCount(); + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + java.util.List + getConnectClustersOrBuilderList(); + /** + * + * + *
+   * The list of Connect clusters in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.ConnectCluster connect_clusters = 1; + */ + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClustersOrBuilder(int index); + + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + java.util.List getUnreachableList(); + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + int getUnreachableCount(); + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + java.lang.String getUnreachable(int index); + /** + * + * + *
+   * Locations that could not be reached.
+   * 
+ * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + com.google.protobuf.ByteString getUnreachableBytes(int index); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequest.java new file mode 100644 index 000000000000..400b1ce4c03a --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequest.java @@ -0,0 +1,965 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for ListConnectors.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectorsRequest} + */ +public final class ListConnectorsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ListConnectorsRequest) + ListConnectorsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListConnectorsRequest.newBuilder() to construct. + private ListConnectorsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListConnectorsRequest() { + parent_ = ""; + pageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListConnectorsRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectorsRequest.class, + com.google.cloud.managedkafka.v1.ListConnectorsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
+   * Required. The parent Connect cluster whose connectors are to be listed.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The parent Connect cluster whose connectors are to be listed.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + /** + * + * + *
+   * Optional. The maximum number of connectors to return. The service may
+   * return fewer than this value. If unspecified, server will pick an
+   * appropriate default.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectors` call.
+   * Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectors`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectors` call.
+   * Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectors`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ListConnectorsRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ListConnectorsRequest other = + (com.google.cloud.managedkafka.v1.ListConnectorsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ListConnectorsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for ListConnectors.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectorsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ListConnectorsRequest) + com.google.cloud.managedkafka.v1.ListConnectorsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectorsRequest.class, + com.google.cloud.managedkafka.v1.ListConnectorsRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ListConnectorsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ListConnectorsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsRequest build() { + com.google.cloud.managedkafka.v1.ListConnectorsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsRequest buildPartial() { + com.google.cloud.managedkafka.v1.ListConnectorsRequest result = + new com.google.cloud.managedkafka.v1.ListConnectorsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ListConnectorsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ListConnectorsRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.ListConnectorsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ListConnectorsRequest other) { + if (other == com.google.cloud.managedkafka.v1.ListConnectorsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The parent Connect cluster whose connectors are to be listed.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The parent Connect cluster whose connectors are to be listed.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The parent Connect cluster whose connectors are to be listed.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent Connect cluster whose connectors are to be listed.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The parent Connect cluster whose connectors are to be listed.
+     * Structured like
+     * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + /** + * + * + *
+     * Optional. The maximum number of connectors to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + /** + * + * + *
+     * Optional. The maximum number of connectors to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The maximum number of connectors to return. The service may
+     * return fewer than this value. If unspecified, server will pick an
+     * appropriate default.
+     * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectors` call.
+     * Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectors`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectors` call.
+     * Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectors`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectors` call.
+     * Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectors`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectors` call.
+     * Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectors`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. A page token, received from a previous `ListConnectors` call.
+     * Provide this to retrieve the subsequent page.
+     *
+     * When paginating, all other parameters provided to `ListConnectors`
+     * must match the call that provided the page token.
+     * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ListConnectorsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ListConnectorsRequest) + private static final com.google.cloud.managedkafka.v1.ListConnectorsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ListConnectorsRequest(); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListConnectorsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequestOrBuilder.java new file mode 100644 index 000000000000..26f76c21c5e0 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsRequestOrBuilder.java @@ -0,0 +1,107 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ListConnectorsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ListConnectorsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The parent Connect cluster whose connectors are to be listed.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The parent Connect cluster whose connectors are to be listed.
+   * Structured like
+   * `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Optional. The maximum number of connectors to return. The service may
+   * return fewer than this value. If unspecified, server will pick an
+   * appropriate default.
+   * 
+ * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectors` call.
+   * Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectors`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + /** + * + * + *
+   * Optional. A page token, received from a previous `ListConnectors` call.
+   * Provide this to retrieve the subsequent page.
+   *
+   * When paginating, all other parameters provided to `ListConnectors`
+   * must match the call that provided the page token.
+   * 
+ * + * string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponse.java new file mode 100644 index 000000000000..75c9cc7fe2cd --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponse.java @@ -0,0 +1,1128 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Response for ListConnectors.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectorsResponse} + */ +public final class ListConnectorsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ListConnectorsResponse) + ListConnectorsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListConnectorsResponse.newBuilder() to construct. + private ListConnectorsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListConnectorsResponse() { + connectors_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListConnectorsResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectorsResponse.class, + com.google.cloud.managedkafka.v1.ListConnectorsResponse.Builder.class); + } + + public static final int CONNECTORS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List connectors_; + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + @java.lang.Override + public java.util.List getConnectorsList() { + return connectors_; + } + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + @java.lang.Override + public java.util.List + getConnectorsOrBuilderList() { + return connectors_; + } + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + @java.lang.Override + public int getConnectorsCount() { + return connectors_.size(); + } + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector getConnectors(int index) { + return connectors_.get(index); + } + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorsOrBuilder(int index) { + return connectors_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < connectors_.size(); i++) { + output.writeMessage(1, connectors_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < connectors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, connectors_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ListConnectorsResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ListConnectorsResponse other = + (com.google.cloud.managedkafka.v1.ListConnectorsResponse) obj; + + if (!getConnectorsList().equals(other.getConnectorsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getConnectorsCount() > 0) { + hash = (37 * hash) + CONNECTORS_FIELD_NUMBER; + hash = (53 * hash) + getConnectorsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ListConnectorsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response for ListConnectors.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.ListConnectorsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ListConnectorsResponse) + com.google.cloud.managedkafka.v1.ListConnectorsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ListConnectorsResponse.class, + com.google.cloud.managedkafka.v1.ListConnectorsResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ListConnectorsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (connectorsBuilder_ == null) { + connectors_ = java.util.Collections.emptyList(); + } else { + connectors_ = null; + connectorsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsResponse getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ListConnectorsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsResponse build() { + com.google.cloud.managedkafka.v1.ListConnectorsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsResponse buildPartial() { + com.google.cloud.managedkafka.v1.ListConnectorsResponse result = + new com.google.cloud.managedkafka.v1.ListConnectorsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.cloud.managedkafka.v1.ListConnectorsResponse result) { + if (connectorsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + connectors_ = java.util.Collections.unmodifiableList(connectors_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.connectors_ = connectors_; + } else { + result.connectors_ = connectorsBuilder_.build(); + } + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ListConnectorsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ListConnectorsResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.ListConnectorsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ListConnectorsResponse other) { + if (other == com.google.cloud.managedkafka.v1.ListConnectorsResponse.getDefaultInstance()) + return this; + if (connectorsBuilder_ == null) { + if (!other.connectors_.isEmpty()) { + if (connectors_.isEmpty()) { + connectors_ = other.connectors_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureConnectorsIsMutable(); + connectors_.addAll(other.connectors_); + } + onChanged(); + } + } else { + if (!other.connectors_.isEmpty()) { + if (connectorsBuilder_.isEmpty()) { + connectorsBuilder_.dispose(); + connectorsBuilder_ = null; + connectors_ = other.connectors_; + bitField0_ = (bitField0_ & ~0x00000001); + connectorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getConnectorsFieldBuilder() + : null; + } else { + connectorsBuilder_.addAllMessages(other.connectors_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.managedkafka.v1.Connector m = + input.readMessage( + com.google.cloud.managedkafka.v1.Connector.parser(), extensionRegistry); + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + connectors_.add(m); + } else { + connectorsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List connectors_ = + java.util.Collections.emptyList(); + + private void ensureConnectorsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + connectors_ = + new java.util.ArrayList(connectors_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + connectorsBuilder_; + + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public java.util.List getConnectorsList() { + if (connectorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(connectors_); + } else { + return connectorsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public int getConnectorsCount() { + if (connectorsBuilder_ == null) { + return connectors_.size(); + } else { + return connectorsBuilder_.getCount(); + } + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public com.google.cloud.managedkafka.v1.Connector getConnectors(int index) { + if (connectorsBuilder_ == null) { + return connectors_.get(index); + } else { + return connectorsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder setConnectors(int index, com.google.cloud.managedkafka.v1.Connector value) { + if (connectorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectorsIsMutable(); + connectors_.set(index, value); + onChanged(); + } else { + connectorsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder setConnectors( + int index, com.google.cloud.managedkafka.v1.Connector.Builder builderForValue) { + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + connectors_.set(index, builderForValue.build()); + onChanged(); + } else { + connectorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder addConnectors(com.google.cloud.managedkafka.v1.Connector value) { + if (connectorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectorsIsMutable(); + connectors_.add(value); + onChanged(); + } else { + connectorsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder addConnectors(int index, com.google.cloud.managedkafka.v1.Connector value) { + if (connectorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConnectorsIsMutable(); + connectors_.add(index, value); + onChanged(); + } else { + connectorsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder addConnectors( + com.google.cloud.managedkafka.v1.Connector.Builder builderForValue) { + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + connectors_.add(builderForValue.build()); + onChanged(); + } else { + connectorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder addConnectors( + int index, com.google.cloud.managedkafka.v1.Connector.Builder builderForValue) { + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + connectors_.add(index, builderForValue.build()); + onChanged(); + } else { + connectorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder addAllConnectors( + java.lang.Iterable values) { + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, connectors_); + onChanged(); + } else { + connectorsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder clearConnectors() { + if (connectorsBuilder_ == null) { + connectors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + connectorsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public Builder removeConnectors(int index) { + if (connectorsBuilder_ == null) { + ensureConnectorsIsMutable(); + connectors_.remove(index); + onChanged(); + } else { + connectorsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public com.google.cloud.managedkafka.v1.Connector.Builder getConnectorsBuilder(int index) { + return getConnectorsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorsOrBuilder(int index) { + if (connectorsBuilder_ == null) { + return connectors_.get(index); + } else { + return connectorsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public java.util.List + getConnectorsOrBuilderList() { + if (connectorsBuilder_ != null) { + return connectorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(connectors_); + } + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public com.google.cloud.managedkafka.v1.Connector.Builder addConnectorsBuilder() { + return getConnectorsFieldBuilder() + .addBuilder(com.google.cloud.managedkafka.v1.Connector.getDefaultInstance()); + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public com.google.cloud.managedkafka.v1.Connector.Builder addConnectorsBuilder(int index) { + return getConnectorsFieldBuilder() + .addBuilder(index, com.google.cloud.managedkafka.v1.Connector.getDefaultInstance()); + } + /** + * + * + *
+     * The list of connectors in the requested parent.
+     * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + public java.util.List + getConnectorsBuilderList() { + return getConnectorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + getConnectorsFieldBuilder() { + if (connectorsBuilder_ == null) { + connectorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder>( + connectors_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + connectors_ = null; + } + return connectorsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * A token that can be sent as `page_token` to retrieve the next page of
+     * results. If this field is omitted, there are no more results.
+     * 
+ * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ListConnectorsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ListConnectorsResponse) + private static final com.google.cloud.managedkafka.v1.ListConnectorsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ListConnectorsResponse(); + } + + public static com.google.cloud.managedkafka.v1.ListConnectorsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListConnectorsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ListConnectorsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponseOrBuilder.java new file mode 100644 index 000000000000..c6576a8010f7 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ListConnectorsResponseOrBuilder.java @@ -0,0 +1,105 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ListConnectorsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ListConnectorsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + java.util.List getConnectorsList(); + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + com.google.cloud.managedkafka.v1.Connector getConnectors(int index); + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + int getConnectorsCount(); + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + java.util.List + getConnectorsOrBuilderList(); + /** + * + * + *
+   * The list of connectors in the requested parent.
+   * 
+ * + * repeated .google.cloud.managedkafka.v1.Connector connectors = 1; + */ + com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorsOrBuilder(int index); + + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + /** + * + * + *
+   * A token that can be sent as `page_token` to retrieve the next page of
+   * results. If this field is omitted, there are no more results.
+   * 
+ * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectProto.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectProto.java new file mode 100644 index 000000000000..9fed6c6d9404 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ManagedKafkaConnectProto.java @@ -0,0 +1,462 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public final class ManagedKafkaConnectProto { + private ManagedKafkaConnectProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n8google/cloud/managedkafka/v1/managed_k" + + "afka_connect.proto\022\034google.cloud.managed" + + "kafka.v1\032\034google/api/annotations.proto\032\027" + + "google/api/client.proto\032\037google/api/fiel" + + "d_behavior.proto\032\033google/api/field_info." + + "proto\032\031google/api/resource.proto\032,google" + + "/cloud/managedkafka/v1/resources.proto\032#" + + "google/longrunning/operations.proto\032\033goo" + + "gle/protobuf/empty.proto\032 google/protobu" + + "f/field_mask.proto\"\\\n\030GetConnectClusterR" + + "equest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*managedkaf" + + "ka.googleapis.com/ConnectCluster\"\357\001\n\033Cre" + + "ateConnectClusterRequest\022B\n\006parent\030\001 \001(\t" + + "B2\340A\002\372A,\022*managedkafka.googleapis.com/Co" + + "nnectCluster\022\037\n\022connect_cluster_id\030\002 \001(\t" + + "B\003\340A\002\022J\n\017connect_cluster\030\003 \001(\0132,.google." + + "cloud.managedkafka.v1.ConnectClusterB\003\340A" + + "\002\022\037\n\nrequest_id\030\004 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\300\001\n\033U" + + "pdateConnectClusterRequest\0224\n\013update_mas" + + "k\030\001 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002" + + "\022J\n\017connect_cluster\030\002 \001(\0132,.google.cloud" + + ".managedkafka.v1.ConnectClusterB\003\340A\002\022\037\n\n" + + "request_id\030\003 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001\"\200\001\n\033Delete" + + "ConnectClusterRequest\022@\n\004name\030\001 \001(\tB2\340A\002" + + "\372A,\n*managedkafka.googleapis.com/Connect" + + "Cluster\022\037\n\nrequest_id\030\002 \001(\tB\013\340A\001\342\214\317\327\010\002\010\001" + + "\"\275\001\n\032ListConnectClustersRequest\022B\n\006paren" + + "t\030\001 \001(\tB2\340A\002\372A,\022*managedkafka.googleapis" + + ".com/ConnectCluster\022\026\n\tpage_size\030\002 \001(\005B\003" + + "\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\022\023\n\006filter\030\004" + + " \001(\tB\003\340A\001\022\025\n\010order_by\030\005 \001(\tB\003\340A\001\"\223\001\n\033Lis" + + "tConnectClustersResponse\022F\n\020connect_clus" + + "ters\030\001 \003(\0132,.google.cloud.managedkafka.v" + + "1.ConnectCluster\022\027\n\017next_page_token\030\002 \001(" + + "\t\022\023\n\013unreachable\030\003 \003(\t\"R\n\023GetConnectorRe" + + "quest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%managedkafk" + + "a.googleapis.com/Connector\"\263\001\n\026CreateCon" + + "nectorRequest\022=\n\006parent\030\001 \001(\tB-\340A\002\372A\'\022%m" + + "anagedkafka.googleapis.com/Connector\022\031\n\014" + + "connector_id\030\002 \001(\tB\003\340A\002\022?\n\tconnector\030\003 \001" + + "(\0132\'.google.cloud.managedkafka.v1.Connec" + + "torB\003\340A\002\"\217\001\n\026UpdateConnectorRequest\0224\n\013u" + + "pdate_mask\030\001 \001(\0132\032.google.protobuf.Field" + + "MaskB\003\340A\002\022?\n\tconnector\030\002 \001(\0132\'.google.cl" + + "oud.managedkafka.v1.ConnectorB\003\340A\002\"U\n\026De" + + "leteConnectorRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372" + + "A\'\n%managedkafka.googleapis.com/Connecto" + + "r\"\207\001\n\025ListConnectorsRequest\022=\n\006parent\030\001 " + + "\001(\tB-\340A\002\372A\'\022%managedkafka.googleapis.com" + + "/Connector\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npa" + + "ge_token\030\003 \001(\tB\003\340A\001\"n\n\026ListConnectorsRes" + + "ponse\022;\n\nconnectors\030\001 \003(\0132\'.google.cloud" + + ".managedkafka.v1.Connector\022\027\n\017next_page_" + + "token\030\002 \001(\t\"T\n\025PauseConnectorRequest\022;\n\004" + + "name\030\001 \001(\tB-\340A\002\372A\'\n%managedkafka.googlea" + + "pis.com/Connector\"\030\n\026PauseConnectorRespo" + + "nse\"U\n\026ResumeConnectorRequest\022;\n\004name\030\001 " + + "\001(\tB-\340A\002\372A\'\n%managedkafka.googleapis.com" + + "/Connector\"\031\n\027ResumeConnectorResponse\"V\n" + + "\027RestartConnectorRequest\022;\n\004name\030\001 \001(\tB-" + + "\340A\002\372A\'\n%managedkafka.googleapis.com/Conn" + + "ector\"\032\n\030RestartConnectorResponse\"S\n\024Sto" + + "pConnectorRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n" + + "%managedkafka.googleapis.com/Connector\"\027" + + "\n\025StopConnectorResponse2\203\031\n\023ManagedKafka" + + "Connect\022\320\001\n\023ListConnectClusters\0228.google" + + ".cloud.managedkafka.v1.ListConnectCluste" + + "rsRequest\0329.google.cloud.managedkafka.v1" + + ".ListConnectClustersResponse\"D\332A\006parent\202" + + "\323\344\223\0025\0223/v1/{parent=projects/*/locations/" + + "*}/connectClusters\022\275\001\n\021GetConnectCluster" + + "\0226.google.cloud.managedkafka.v1.GetConne" + + "ctClusterRequest\032,.google.cloud.managedk" + + "afka.v1.ConnectCluster\"B\332A\004name\202\323\344\223\0025\0223/" + + "v1/{name=projects/*/locations/*/connectC" + + "lusters/*}\022\221\002\n\024CreateConnectCluster\0229.go" + + "ogle.cloud.managedkafka.v1.CreateConnect" + + "ClusterRequest\032\035.google.longrunning.Oper" + + "ation\"\236\001\312A#\n\016ConnectCluster\022\021OperationMe" + + "tadata\332A)parent,connect_cluster,connect_" + + "cluster_id\202\323\344\223\002F\"3/v1/{parent=projects/*" + + "/locations/*}/connectClusters:\017connect_c" + + "luster\022\223\002\n\024UpdateConnectCluster\0229.google" + + ".cloud.managedkafka.v1.UpdateConnectClus" + + "terRequest\032\035.google.longrunning.Operatio" + + "n\"\240\001\312A#\n\016ConnectCluster\022\021OperationMetada" + + "ta\332A\033connect_cluster,update_mask\202\323\344\223\002V2C" + + "/v1/{connect_cluster.name=projects/*/loc" + + "ations/*/connectClusters/*}:\017connect_clu" + + "ster\022\341\001\n\024DeleteConnectCluster\0229.google.c" + + "loud.managedkafka.v1.DeleteConnectCluste" + + "rRequest\032\035.google.longrunning.Operation\"" + + "o\312A*\n\025google.protobuf.Empty\022\021OperationMe" + + "tadata\332A\004name\202\323\344\223\0025*3/v1/{name=projects/" + + "*/locations/*/connectClusters/*}\022\316\001\n\016Lis" + + "tConnectors\0223.google.cloud.managedkafka." + + "v1.ListConnectorsRequest\0324.google.cloud." + + "managedkafka.v1.ListConnectorsResponse\"Q" + + "\332A\006parent\202\323\344\223\002B\022@/v1/{parent=projects/*/" + + "locations/*/connectClusters/*}/connector" + + "s\022\273\001\n\014GetConnector\0221.google.cloud.manage" + + "dkafka.v1.GetConnectorRequest\032\'.google.c" + + "loud.managedkafka.v1.Connector\"O\332A\004name\202" + + "\323\344\223\002B\022@/v1/{name=projects/*/locations/*/" + + "connectClusters/*/connectors/*}\022\345\001\n\017Crea" + + "teConnector\0224.google.cloud.managedkafka." + + "v1.CreateConnectorRequest\032\'.google.cloud" + + ".managedkafka.v1.Connector\"s\332A\035parent,co" + + "nnector,connector_id\202\323\344\223\002M\"@/v1/{parent=" + + "projects/*/locations/*/connectClusters/*" + + "}/connectors:\tconnector\022\347\001\n\017UpdateConnec" + + "tor\0224.google.cloud.managedkafka.v1.Updat" + + "eConnectorRequest\032\'.google.cloud.managed" + + "kafka.v1.Connector\"u\332A\025connector,update_" + + "mask\202\323\344\223\002W2J/v1/{connector.name=projects" + + "/*/locations/*/connectClusters/*/connect" + + "ors/*}:\tconnector\022\260\001\n\017DeleteConnector\0224." + + "google.cloud.managedkafka.v1.DeleteConne" + + "ctorRequest\032\026.google.protobuf.Empty\"O\332A\004" + + "name\202\323\344\223\002B*@/v1/{name=projects/*/locatio" + + "ns/*/connectClusters/*/connectors/*}\022\325\001\n" + + "\016PauseConnector\0223.google.cloud.managedka" + + "fka.v1.PauseConnectorRequest\0324.google.cl" + + "oud.managedkafka.v1.PauseConnectorRespon" + + "se\"X\332A\004name\202\323\344\223\002K\"F/v1/{name=projects/*/" + + "locations/*/connectClusters/*/connectors" + + "/*}:pause:\001*\022\331\001\n\017ResumeConnector\0224.googl" + + "e.cloud.managedkafka.v1.ResumeConnectorR" + + "equest\0325.google.cloud.managedkafka.v1.Re" + + "sumeConnectorResponse\"Y\332A\004name\202\323\344\223\002L\"G/v" + + "1/{name=projects/*/locations/*/connectCl" + + "usters/*/connectors/*}:resume:\001*\022\335\001\n\020Res" + + "tartConnector\0225.google.cloud.managedkafk" + + "a.v1.RestartConnectorRequest\0326.google.cl" + + "oud.managedkafka.v1.RestartConnectorResp" + + "onse\"Z\332A\004name\202\323\344\223\002M\"H/v1/{name=projects/" + + "*/locations/*/connectClusters/*/connecto" + + "rs/*}:restart:\001*\022\321\001\n\rStopConnector\0222.goo" + + "gle.cloud.managedkafka.v1.StopConnectorR" + + "equest\0323.google.cloud.managedkafka.v1.St" + + "opConnectorResponse\"W\332A\004name\202\323\344\223\002J\"E/v1/" + + "{name=projects/*/locations/*/connectClus" + + "ters/*/connectors/*}:stop:\001*\032O\312A\033managed" + + "kafka.googleapis.com\322A.https://www.googl" + + "eapis.com/auth/cloud-platformB\344\001\n com.go" + + "ogle.cloud.managedkafka.v1B\030ManagedKafka" + + "ConnectProtoP\001ZDcloud.google.com/go/mana" + + "gedkafka/apiv1/managedkafkapb;managedkaf" + + "kapb\252\002\034Google.Cloud.ManagedKafka.V1\312\002\034Go" + + "ogle\\Cloud\\ManagedKafka\\V1\352\002\037Google::Clo" + + "ud::ManagedKafka::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.FieldInfoProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.managedkafka.v1.ResourcesProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + }); + internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_GetConnectClusterRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_CreateConnectClusterRequest_descriptor, + new java.lang.String[] { + "Parent", "ConnectClusterId", "ConnectCluster", "RequestId", + }); + internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor, + new java.lang.String[] { + "UpdateMask", "ConnectCluster", "RequestId", + }); + internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_DeleteConnectClusterRequest_descriptor, + new java.lang.String[] { + "Name", "RequestId", + }); + internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ListConnectClustersRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "Filter", "OrderBy", + }); + internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ListConnectClustersResponse_descriptor, + new java.lang.String[] { + "ConnectClusters", "NextPageToken", "Unreachable", + }); + internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_GetConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_CreateConnectorRequest_descriptor, + new java.lang.String[] { + "Parent", "ConnectorId", "Connector", + }); + internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor, + new java.lang.String[] { + "UpdateMask", "Connector", + }); + internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_DeleteConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ListConnectorsRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ListConnectorsResponse_descriptor, + new java.lang.String[] { + "Connectors", "NextPageToken", + }); + internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor, + new java.lang.String[] {}); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.FieldInfoProto.fieldInfo); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.longrunning.OperationsProto.operationInfo); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.FieldInfoProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.managedkafka.v1.ResourcesProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequest.java new file mode 100644 index 000000000000..09c28ae49736 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + *
+ * Request for PauseConnector.
+ * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.PauseConnectorRequest} + */ +public final class PauseConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.PauseConnectorRequest) + PauseConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use PauseConnectorRequest.newBuilder() to construct. + private PauseConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PauseConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PauseConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.PauseConnectorRequest.class, + com.google.cloud.managedkafka.v1.PauseConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.PauseConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.PauseConnectorRequest other = + (com.google.cloud.managedkafka.v1.PauseConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.PauseConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request for PauseConnector.
+   * 
+ * + * Protobuf type {@code google.cloud.managedkafka.v1.PauseConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.PauseConnectorRequest) + com.google.cloud.managedkafka.v1.PauseConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.PauseConnectorRequest.class, + com.google.cloud.managedkafka.v1.PauseConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.PauseConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.PauseConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorRequest build() { + com.google.cloud.managedkafka.v1.PauseConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.PauseConnectorRequest result = + new com.google.cloud.managedkafka.v1.PauseConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.PauseConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.PauseConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.PauseConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.PauseConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.PauseConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.PauseConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.PauseConnectorRequest) + private static final com.google.cloud.managedkafka.v1.PauseConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.PauseConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PauseConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..0e787595745e --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface PauseConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.PauseConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponse.java new file mode 100644 index 000000000000..93660231fb5f --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponse.java @@ -0,0 +1,433 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.PauseConnectorResponse} + */ +public final class PauseConnectorResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.PauseConnectorResponse) + PauseConnectorResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use PauseConnectorResponse.newBuilder() to construct. + private PauseConnectorResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PauseConnectorResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PauseConnectorResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.PauseConnectorResponse.class, + com.google.cloud.managedkafka.v1.PauseConnectorResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.PauseConnectorResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.PauseConnectorResponse other = + (com.google.cloud.managedkafka.v1.PauseConnectorResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.PauseConnectorResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.PauseConnectorResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+ * Response for PauseConnector.
+ * 
+   * Response for PauseConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.PauseConnectorResponse) + com.google.cloud.managedkafka.v1.PauseConnectorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.PauseConnectorResponse.class, + com.google.cloud.managedkafka.v1.PauseConnectorResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.PauseConnectorResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_PauseConnectorResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorResponse getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.PauseConnectorResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorResponse build() { + com.google.cloud.managedkafka.v1.PauseConnectorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorResponse buildPartial() { + com.google.cloud.managedkafka.v1.PauseConnectorResponse result = + new com.google.cloud.managedkafka.v1.PauseConnectorResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.PauseConnectorResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.PauseConnectorResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.PauseConnectorResponse other) { + if (other == com.google.cloud.managedkafka.v1.PauseConnectorResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.PauseConnectorResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.PauseConnectorResponse) + private static final com.google.cloud.managedkafka.v1.PauseConnectorResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.PauseConnectorResponse(); + } + + public static com.google.cloud.managedkafka.v1.PauseConnectorResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PauseConnectorResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.PauseConnectorResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponseOrBuilder.java new file mode 100644 index 000000000000..f811feef8153 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/PauseConnectorResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface PauseConnectorResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.PauseConnectorResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResourcesProto.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResourcesProto.java index b7373aa216f8..0a32daf0b2c0 100644 --- a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResourcesProto.java +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResourcesProto.java @@ -88,6 +88,42 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_managedkafka_v1_OperationMetadata_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_managedkafka_v1_OperationMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectCluster_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_Connector_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_Connector_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -100,86 +136,140 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "\n,google/cloud/managedkafka/v1/resources" + ".proto\022\034google.cloud.managedkafka.v1\032\037go" + "ogle/api/field_behavior.proto\032\031google/ap" - + "i/resource.proto\032\037google/protobuf/timest" - + "amp.proto\"\330\006\n\007Cluster\022B\n\ngcp_config\030\t \001(" - + "\0132\'.google.cloud.managedkafka.v1.GcpConf" - + "igB\003\340A\002H\000\022\021\n\004name\030\001 \001(\tB\003\340A\010\0224\n\013create_t" - + "ime\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340" - + "A\003\0224\n\013update_time\030\003 \001(\0132\032.google.protobu" - + "f.TimestampB\003\340A\003\022F\n\006labels\030\004 \003(\01321.googl" - + "e.cloud.managedkafka.v1.Cluster.LabelsEn" - + "tryB\003\340A\001\022J\n\017capacity_config\030\005 \001(\0132,.goog" - + "le.cloud.managedkafka.v1.CapacityConfigB" - + "\003\340A\002\022L\n\020rebalance_config\030\010 \001(\0132-.google." - + "cloud.managedkafka.v1.RebalanceConfigB\003\340" - + "A\001\022?\n\005state\030\n \001(\0162+.google.cloud.managed" - + "kafka.v1.Cluster.StateB\003\340A\003\022\037\n\rsatisfies" - + "_pzi\030\013 \001(\010B\003\340A\003H\001\210\001\001\022\037\n\rsatisfies_pzs\030\014 " - + "\001(\010B\003\340A\003H\002\210\001\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(" - + "\t\022\r\n\005value\030\002 \001(\t:\0028\001\"F\n\005State\022\025\n\021STATE_U" - + "NSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\n\n\006ACTIVE\020\002\022\014" - + "\n\010DELETING\020\003:w\352At\n#managedkafka.googleap" - + "is.com/Cluster\022:projects/{project}/locat" - + "ions/{location}/clusters/{cluster}*\010clus" - + "ters2\007clusterB\021\n\017platform_configB\020\n\016_sat" - + "isfies_pziB\020\n\016_satisfies_pzs\"D\n\016Capacity" - + "Config\022\027\n\nvcpu_count\030\001 \001(\003B\003\340A\002\022\031\n\014memor" - + "y_bytes\030\002 \001(\003B\003\340A\002\"\250\001\n\017RebalanceConfig\022E" - + "\n\004mode\030\001 \001(\01622.google.cloud.managedkafka" - + ".v1.RebalanceConfig.ModeB\003\340A\001\"N\n\004Mode\022\024\n" - + "\020MODE_UNSPECIFIED\020\000\022\020\n\014NO_REBALANCE\020\001\022\036\n" - + "\032AUTO_REBALANCE_ON_SCALE_UP\020\002\"$\n\rNetwork" - + "Config\022\023\n\006subnet\030\002 \001(\tB\003\340A\002\"Y\n\014AccessCon" - + "fig\022I\n\017network_configs\030\001 \003(\0132+.google.cl" - + "oud.managedkafka.v1.NetworkConfigB\003\340A\002\"\222" - + "\001\n\tGcpConfig\022F\n\raccess_config\030\003 \001(\0132*.go" - + "ogle.cloud.managedkafka.v1.AccessConfigB" - + "\003\340A\002\022=\n\007kms_key\030\002 \001(\tB,\340A\001\340A\005\372A#\n!cloudk" - + "ms.googleapis.com/CryptoKey\"\327\002\n\005Topic\022\021\n" - + "\004name\030\001 \001(\tB\003\340A\010\022\034\n\017partition_count\030\002 \001(" - + "\005B\003\340A\002\022\"\n\022replication_factor\030\003 \001(\005B\006\340A\002\340" - + "A\005\022F\n\007configs\030\004 \003(\01320.google.cloud.manag" - + "edkafka.v1.Topic.ConfigsEntryB\003\340A\001\032.\n\014Co" - + "nfigsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\002" - + "8\001:\200\001\352A}\n!managedkafka.googleapis.com/To" - + "pic\022Iprojects/{project}/locations/{locat" - + "ion}/clusters/{cluster}/topics/{topic}*\006" - + "topics2\005topic\"\341\001\n\025ConsumerTopicMetadata\022" - + "\\\n\npartitions\030\001 \003(\0132C.google.cloud.manag" - + "edkafka.v1.ConsumerTopicMetadata.Partiti" - + "onsEntryB\003\340A\001\032j\n\017PartitionsEntry\022\013\n\003key\030" - + "\001 \001(\005\022F\n\005value\030\002 \001(\01327.google.cloud.mana" - + "gedkafka.v1.ConsumerPartitionMetadata:\0028" - + "\001\"G\n\031ConsumerPartitionMetadata\022\023\n\006offset" - + "\030\001 \001(\003B\003\340A\002\022\025\n\010metadata\030\002 \001(\tB\003\340A\001\"\201\003\n\rC" - + "onsumerGroup\022\021\n\004name\030\001 \001(\tB\003\340A\010\022L\n\006topic" - + "s\030\002 \003(\01327.google.cloud.managedkafka.v1.C" - + "onsumerGroup.TopicsEntryB\003\340A\001\032b\n\013TopicsE" - + "ntry\022\013\n\003key\030\001 \001(\t\022B\n\005value\030\002 \001(\01323.googl" - + "e.cloud.managedkafka.v1.ConsumerTopicMet" - + "adata:\0028\001:\252\001\352A\246\001\n)managedkafka.googleapi" - + "s.com/ConsumerGroup\022Zprojects/{project}/" - + "locations/{location}/clusters/{cluster}/" - + "consumerGroups/{consumer_group}*\016consume" - + "rGroups2\rconsumerGroup\"\200\002\n\021OperationMeta" - + "data\0224\n\013create_time\030\001 \001(\0132\032.google.proto" - + "buf.TimestampB\003\340A\003\0221\n\010end_time\030\002 \001(\0132\032.g" - + "oogle.protobuf.TimestampB\003\340A\003\022\023\n\006target\030" - + "\003 \001(\tB\003\340A\003\022\021\n\004verb\030\004 \001(\tB\003\340A\003\022\033\n\016status_" - + "message\030\005 \001(\tB\003\340A\003\022#\n\026requested_cancella" - + "tion\030\006 \001(\010B\003\340A\003\022\030\n\013api_version\030\007 \001(\tB\003\340A" - + "\003B\303\003\n com.google.cloud.managedkafka.v1B\016" - + "ResourcesProtoP\001ZDcloud.google.com/go/ma" - + "nagedkafka/apiv1/managedkafkapb;managedk" - + "afkapb\252\002\034Google.Cloud.ManagedKafka.V1\312\002\034" - + "Google\\Cloud\\ManagedKafka\\V1\352\002\037Google::C" - + "loud::ManagedKafka::V1\352Ax\n!cloudkms.goog" - + "leapis.com/CryptoKey\022Sprojects/{project}" - + "/locations/{location}/keyRings/{key_ring" - + "}/cryptoKeys/{crypto_key}\352Ak\n*secretmana" - + "ger.googleapis.com/SecretVersion\022=projec" - + "ts/{project}/secrets/{secret}/versions/{" - + "secret_version}b\006proto3" + + "i/resource.proto\032\036google/protobuf/durati" + + "on.proto\032\037google/protobuf/timestamp.prot" + + "o\"\330\006\n\007Cluster\022B\n\ngcp_config\030\t \001(\0132\'.goog" + + "le.cloud.managedkafka.v1.GcpConfigB\003\340A\002H" + + "\000\022\021\n\004name\030\001 \001(\tB\003\340A\010\0224\n\013create_time\030\002 \001(" + + "\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n\013up" + + "date_time\030\003 \001(\0132\032.google.protobuf.Timest" + + "ampB\003\340A\003\022F\n\006labels\030\004 \003(\01321.google.cloud." + + "managedkafka.v1.Cluster.LabelsEntryB\003\340A\001" + + "\022J\n\017capacity_config\030\005 \001(\0132,.google.cloud" + + ".managedkafka.v1.CapacityConfigB\003\340A\002\022L\n\020" + + "rebalance_config\030\010 \001(\0132-.google.cloud.ma" + + "nagedkafka.v1.RebalanceConfigB\003\340A\001\022?\n\005st" + + "ate\030\n \001(\0162+.google.cloud.managedkafka.v1" + + ".Cluster.StateB\003\340A\003\022\037\n\rsatisfies_pzi\030\013 \001" + + "(\010B\003\340A\003H\001\210\001\001\022\037\n\rsatisfies_pzs\030\014 \001(\010B\003\340A\003" + + "H\002\210\001\001\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005val" + + "ue\030\002 \001(\t:\0028\001\"F\n\005State\022\025\n\021STATE_UNSPECIFI" + + "ED\020\000\022\014\n\010CREATING\020\001\022\n\n\006ACTIVE\020\002\022\014\n\010DELETI" + + "NG\020\003:w\352At\n#managedkafka.googleapis.com/C" + + "luster\022:projects/{project}/locations/{lo" + + "cation}/clusters/{cluster}*\010clusters2\007cl" + + "usterB\021\n\017platform_configB\020\n\016_satisfies_p" + + "ziB\020\n\016_satisfies_pzs\"D\n\016CapacityConfig\022\027" + + "\n\nvcpu_count\030\001 \001(\003B\003\340A\002\022\031\n\014memory_bytes\030" + + "\002 \001(\003B\003\340A\002\"\250\001\n\017RebalanceConfig\022E\n\004mode\030\001" + + " \001(\01622.google.cloud.managedkafka.v1.Reba" + + "lanceConfig.ModeB\003\340A\001\"N\n\004Mode\022\024\n\020MODE_UN" + + "SPECIFIED\020\000\022\020\n\014NO_REBALANCE\020\001\022\036\n\032AUTO_RE" + + "BALANCE_ON_SCALE_UP\020\002\"$\n\rNetworkConfig\022\023" + + "\n\006subnet\030\002 \001(\tB\003\340A\002\"Y\n\014AccessConfig\022I\n\017n" + + "etwork_configs\030\001 \003(\0132+.google.cloud.mana" + + "gedkafka.v1.NetworkConfigB\003\340A\002\"\222\001\n\tGcpCo" + + "nfig\022F\n\raccess_config\030\003 \001(\0132*.google.clo" + + "ud.managedkafka.v1.AccessConfigB\003\340A\002\022=\n\007" + + "kms_key\030\002 \001(\tB,\340A\001\340A\005\372A#\n!cloudkms.googl" + + "eapis.com/CryptoKey\"\327\002\n\005Topic\022\021\n\004name\030\001 " + + "\001(\tB\003\340A\010\022\034\n\017partition_count\030\002 \001(\005B\003\340A\002\022\"" + + "\n\022replication_factor\030\003 \001(\005B\006\340A\002\340A\005\022F\n\007co" + + "nfigs\030\004 \003(\01320.google.cloud.managedkafka." + + "v1.Topic.ConfigsEntryB\003\340A\001\032.\n\014ConfigsEnt" + + "ry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001:\200\001\352A}" + + "\n!managedkafka.googleapis.com/Topic\022Ipro" + + "jects/{project}/locations/{location}/clu" + + "sters/{cluster}/topics/{topic}*\006topics2\005" + + "topic\"\341\001\n\025ConsumerTopicMetadata\022\\\n\nparti" + + "tions\030\001 \003(\0132C.google.cloud.managedkafka." + + "v1.ConsumerTopicMetadata.PartitionsEntry" + + "B\003\340A\001\032j\n\017PartitionsEntry\022\013\n\003key\030\001 \001(\005\022F\n" + + "\005value\030\002 \001(\01327.google.cloud.managedkafka" + + ".v1.ConsumerPartitionMetadata:\0028\001\"G\n\031Con" + + "sumerPartitionMetadata\022\023\n\006offset\030\001 \001(\003B\003" + + "\340A\002\022\025\n\010metadata\030\002 \001(\tB\003\340A\001\"\201\003\n\rConsumerG" + + "roup\022\021\n\004name\030\001 \001(\tB\003\340A\010\022L\n\006topics\030\002 \003(\0132" + + "7.google.cloud.managedkafka.v1.ConsumerG" + + "roup.TopicsEntryB\003\340A\001\032b\n\013TopicsEntry\022\013\n\003" + + "key\030\001 \001(\t\022B\n\005value\030\002 \001(\01323.google.cloud." + + "managedkafka.v1.ConsumerTopicMetadata:\0028" + + "\001:\252\001\352A\246\001\n)managedkafka.googleapis.com/Co" + + "nsumerGroup\022Zprojects/{project}/location" + + "s/{location}/clusters/{cluster}/consumer" + + "Groups/{consumer_group}*\016consumerGroups2" + + "\rconsumerGroup\"\200\002\n\021OperationMetadata\0224\n\013" + + "create_time\030\001 \001(\0132\032.google.protobuf.Time" + + "stampB\003\340A\003\0221\n\010end_time\030\002 \001(\0132\032.google.pr" + + "otobuf.TimestampB\003\340A\003\022\023\n\006target\030\003 \001(\tB\003\340" + + "A\003\022\021\n\004verb\030\004 \001(\tB\003\340A\003\022\033\n\016status_message\030" + + "\005 \001(\tB\003\340A\003\022#\n\026requested_cancellation\030\006 \001" + + "(\010B\003\340A\003\022\030\n\013api_version\030\007 \001(\tB\003\340A\003\"\203\007\n\016Co" + + "nnectCluster\022I\n\ngcp_config\030\007 \001(\0132..googl" + + "e.cloud.managedkafka.v1.ConnectGcpConfig" + + "B\003\340A\002H\000\022\021\n\004name\030\001 \001(\tB\003\340A\010\022\035\n\rkafka_clus" + + "ter\030\002 \001(\tB\006\340A\002\340A\005\0224\n\013create_time\030\003 \001(\0132\032" + + ".google.protobuf.TimestampB\003\340A\003\0224\n\013updat" + + "e_time\030\004 \001(\0132\032.google.protobuf.Timestamp" + + "B\003\340A\003\022M\n\006labels\030\005 \003(\01328.google.cloud.man" + + "agedkafka.v1.ConnectCluster.LabelsEntryB" + + "\003\340A\001\022J\n\017capacity_config\030\006 \001(\0132,.google.c" + + "loud.managedkafka.v1.CapacityConfigB\003\340A\002" + + "\022F\n\005state\030\010 \001(\01622.google.cloud.managedka" + + "fka.v1.ConnectCluster.StateB\003\340A\003\022M\n\006conf" + + "ig\030\t \003(\01328.google.cloud.managedkafka.v1." + + "ConnectCluster.ConfigEntryB\003\340A\001\032-\n\013Label" + + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\032-" + + "\n\013ConfigEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(" + + "\t:\0028\001\"F\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\n\n\006ACTIVE\020\002\022\014\n\010DELETING\020\003:\234\001" + + "\352A\230\001\n*managedkafka.googleapis.com/Connec" + + "tCluster\022Iprojects/{project}/locations/{" + + "location}/connectClusters/{connect_clust" + + "er}*\017connectClusters2\016connectClusterB\021\n\017" + + "platform_config\"s\n\024ConnectNetworkConfig\022" + + "\033\n\016primary_subnet\030\003 \001(\tB\003\340A\002\022\037\n\022addition" + + "al_subnets\030\004 \003(\tB\003\340A\001\022\035\n\020dns_domain_name" + + "s\030\002 \003(\tB\003\340A\001\"g\n\023ConnectAccessConfig\022P\n\017n" + + "etwork_configs\030\001 \003(\01322.google.cloud.mana" + + "gedkafka.v1.ConnectNetworkConfigB\003\340A\002\"\253\001" + + "\n\020ConnectGcpConfig\022M\n\raccess_config\030\001 \001(" + + "\01321.google.cloud.managedkafka.v1.Connect" + + "AccessConfigB\003\340A\002\022H\n\014secret_paths\030\002 \003(\tB" + + "2\340A\001\372A,\n*secretmanager.googleapis.com/Se" + + "cretVersion\"\333\004\n\tConnector\022Q\n\023task_restar" + + "t_policy\030\004 \001(\0132-.google.cloud.managedkaf" + + "ka.v1.TaskRetryPolicyB\003\340A\001H\000\022\021\n\004name\030\001 \001" + + "(\tB\003\340A\010\022J\n\007configs\030\002 \003(\01324.google.cloud." + + "managedkafka.v1.Connector.ConfigsEntryB\003" + + "\340A\001\022A\n\005state\030\003 \001(\0162-.google.cloud.manage" + + "dkafka.v1.Connector.StateB\003\340A\003\032.\n\014Config" + + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"p" + + "\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\016\n\nUNASSI" + + "GNED\020\001\022\013\n\007RUNNING\020\002\022\n\n\006PAUSED\020\003\022\n\n\006FAILE" + + "D\020\004\022\016\n\nRESTARTING\020\005\022\013\n\007STOPPED\020\006:\244\001\352A\240\001\n" + + "%managedkafka.googleapis.com/Connector\022`" + + "projects/{project}/locations/{location}/" + + "connectClusters/{connect_cluster}/connec" + + "tors/{connector}*\nconnectors2\tconnectorB" + + "\020\n\016restart_policy\"\203\001\n\017TaskRetryPolicy\0227\n" + + "\017minimum_backoff\030\001 \001(\0132\031.google.protobuf" + + ".DurationB\003\340A\001\0227\n\017maximum_backoff\030\002 \001(\0132" + + "\031.google.protobuf.DurationB\003\340A\001B\242\004\n com." + + "google.cloud.managedkafka.v1B\016ResourcesP" + + "rotoP\001ZDcloud.google.com/go/managedkafka" + + "/apiv1/managedkafkapb;managedkafkapb\252\002\034G" + + "oogle.Cloud.ManagedKafka.V1\312\002\034Google\\Clo" + + "ud\\ManagedKafka\\V1\352\002\037Google::Cloud::Mana" + + "gedKafka::V1\352Ax\n!cloudkms.googleapis.com" + + "/CryptoKey\022Sprojects/{project}/locations" + + "/{location}/keyRings/{key_ring}/cryptoKe" + + "ys/{crypto_key}\352Ak\n*secretmanager.google" + + "apis.com/SecretVersion\022=projects/{projec" + + "t}/secrets/{secret}/versions/{secret_ver" + + "sion}\352A\\\n\037privateca.googleapis.com/CaPoo" + + "l\0229projects/{project}/locations/{locatio" + + "n}/caPools/{ca_pool}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -187,6 +277,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.api.FieldBehaviorProto.getDescriptor(), com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), }); internal_static_google_cloud_managedkafka_v1_Cluster_descriptor = @@ -329,6 +420,91 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "RequestedCancellation", "ApiVersion", }); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor, + new java.lang.String[] { + "GcpConfig", + "Name", + "KafkaCluster", + "CreateTime", + "UpdateTime", + "Labels", + "CapacityConfig", + "State", + "Config", + "PlatformConfig", + }); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_descriptor = + internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectCluster_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_descriptor = + internal_static_google_cloud_managedkafka_v1_ConnectCluster_descriptor + .getNestedTypes() + .get(1); + internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectCluster_ConfigEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectNetworkConfig_descriptor, + new java.lang.String[] { + "PrimarySubnet", "AdditionalSubnets", "DnsDomainNames", + }); + internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectAccessConfig_descriptor, + new java.lang.String[] { + "NetworkConfigs", + }); + internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_ConnectGcpConfig_descriptor, + new java.lang.String[] { + "AccessConfig", "SecretPaths", + }); + internal_static_google_cloud_managedkafka_v1_Connector_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_managedkafka_v1_Connector_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_Connector_descriptor, + new java.lang.String[] { + "TaskRestartPolicy", "Name", "Configs", "State", "RestartPolicy", + }); + internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_descriptor = + internal_static_google_cloud_managedkafka_v1_Connector_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_Connector_ConfigsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor, + new java.lang.String[] { + "MinimumBackoff", "MaximumBackoff", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); @@ -339,6 +515,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { descriptor, registry); com.google.api.FieldBehaviorProto.getDescriptor(); com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); } diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequest.java new file mode 100644 index 000000000000..b5b1033da42d --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.RestartConnectorRequest} + */ +public final class RestartConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.RestartConnectorRequest) + RestartConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use RestartConnectorRequest.newBuilder() to construct. + private RestartConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private RestartConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new RestartConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.RestartConnectorRequest.class, + com.google.cloud.managedkafka.v1.RestartConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.RestartConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.RestartConnectorRequest other = + (com.google.cloud.managedkafka.v1.RestartConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.RestartConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.RestartConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Request for RestartConnector.
+ * 
+   * Required. The name of the connector to restart.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to restart.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Request for RestartConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.RestartConnectorRequest) + com.google.cloud.managedkafka.v1.RestartConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.RestartConnectorRequest.class, + com.google.cloud.managedkafka.v1.RestartConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.RestartConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.RestartConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorRequest build() { + com.google.cloud.managedkafka.v1.RestartConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.RestartConnectorRequest result = + new com.google.cloud.managedkafka.v1.RestartConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.RestartConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.RestartConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.RestartConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.RestartConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.RestartConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + * + * + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.RestartConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.RestartConnectorRequest) + private static final com.google.cloud.managedkafka.v1.RestartConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.RestartConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Required. The name of the connector to restart.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to restart.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to restart.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to restart.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to restart.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestartConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..834ff98bfdf0 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface RestartConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.RestartConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponse.java new file mode 100644 index 000000000000..ad00cbf1b97e --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponse.java @@ -0,0 +1,433 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.RestartConnectorResponse} + */ +public final class RestartConnectorResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.RestartConnectorResponse) + RestartConnectorResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use RestartConnectorResponse.newBuilder() to construct. + private RestartConnectorResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private RestartConnectorResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new RestartConnectorResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.RestartConnectorResponse.class, + com.google.cloud.managedkafka.v1.RestartConnectorResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.RestartConnectorResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.RestartConnectorResponse other = + (com.google.cloud.managedkafka.v1.RestartConnectorResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.RestartConnectorResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.RestartConnectorResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Required. The name of the connector to restart.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to restart.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+ * Response for RestartConnector.
+ * 
+   * Response for RestartConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.RestartConnectorResponse) + com.google.cloud.managedkafka.v1.RestartConnectorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.RestartConnectorResponse.class, + com.google.cloud.managedkafka.v1.RestartConnectorResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.RestartConnectorResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_RestartConnectorResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorResponse getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.RestartConnectorResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorResponse build() { + com.google.cloud.managedkafka.v1.RestartConnectorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorResponse buildPartial() { + com.google.cloud.managedkafka.v1.RestartConnectorResponse result = + new com.google.cloud.managedkafka.v1.RestartConnectorResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.RestartConnectorResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.RestartConnectorResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.RestartConnectorResponse other) { + if (other == com.google.cloud.managedkafka.v1.RestartConnectorResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.RestartConnectorResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.RestartConnectorResponse) + private static final com.google.cloud.managedkafka.v1.RestartConnectorResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.RestartConnectorResponse(); + } + + public static com.google.cloud.managedkafka.v1.RestartConnectorResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestartConnectorResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.RestartConnectorResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponseOrBuilder.java new file mode 100644 index 000000000000..99a4c74d3cd5 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/RestartConnectorResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface RestartConnectorResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.RestartConnectorResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequest.java new file mode 100644 index 000000000000..9eebed61b12e --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.ResumeConnectorRequest} + */ +public final class ResumeConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ResumeConnectorRequest) + ResumeConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ResumeConnectorRequest.newBuilder() to construct. + private ResumeConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ResumeConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ResumeConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest.class, + com.google.cloud.managedkafka.v1.ResumeConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ResumeConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ResumeConnectorRequest other = + (com.google.cloud.managedkafka.v1.ResumeConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.ResumeConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Request for ResumeConnector.
+ * 
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Request for ResumeConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ResumeConnectorRequest) + com.google.cloud.managedkafka.v1.ResumeConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ResumeConnectorRequest.class, + com.google.cloud.managedkafka.v1.ResumeConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ResumeConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ResumeConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorRequest build() { + com.google.cloud.managedkafka.v1.ResumeConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.ResumeConnectorRequest result = + new com.google.cloud.managedkafka.v1.ResumeConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.ResumeConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ResumeConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.ResumeConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ResumeConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.ResumeConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + * + * + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ResumeConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ResumeConnectorRequest) + private static final com.google.cloud.managedkafka.v1.ResumeConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ResumeConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to pause.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResumeConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..4eec3e89d658 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ResumeConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ResumeConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponse.java new file mode 100644 index 000000000000..6c85ad8e3e2b --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponse.java @@ -0,0 +1,433 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.ResumeConnectorResponse} + */ +public final class ResumeConnectorResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.ResumeConnectorResponse) + ResumeConnectorResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ResumeConnectorResponse.newBuilder() to construct. + private ResumeConnectorResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ResumeConnectorResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ResumeConnectorResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ResumeConnectorResponse.class, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.ResumeConnectorResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.ResumeConnectorResponse other = + (com.google.cloud.managedkafka.v1.ResumeConnectorResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.ResumeConnectorResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.ResumeConnectorResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to pause.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+ * Response for ResumeConnector.
+ * 
+   * Response for ResumeConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.ResumeConnectorResponse) + com.google.cloud.managedkafka.v1.ResumeConnectorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.ResumeConnectorResponse.class, + com.google.cloud.managedkafka.v1.ResumeConnectorResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.ResumeConnectorResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_ResumeConnectorResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorResponse getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.ResumeConnectorResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorResponse build() { + com.google.cloud.managedkafka.v1.ResumeConnectorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorResponse buildPartial() { + com.google.cloud.managedkafka.v1.ResumeConnectorResponse result = + new com.google.cloud.managedkafka.v1.ResumeConnectorResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.ResumeConnectorResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.ResumeConnectorResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.ResumeConnectorResponse other) { + if (other == com.google.cloud.managedkafka.v1.ResumeConnectorResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.ResumeConnectorResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.ResumeConnectorResponse) + private static final com.google.cloud.managedkafka.v1.ResumeConnectorResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.ResumeConnectorResponse(); + } + + public static com.google.cloud.managedkafka.v1.ResumeConnectorResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResumeConnectorResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.ResumeConnectorResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponseOrBuilder.java new file mode 100644 index 000000000000..021dc5a20894 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/ResumeConnectorResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface ResumeConnectorResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.ResumeConnectorResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequest.java new file mode 100644 index 000000000000..7e33d2ca6413 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequest.java @@ -0,0 +1,654 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.StopConnectorRequest} + */ +public final class StopConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.StopConnectorRequest) + StopConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use StopConnectorRequest.newBuilder() to construct. + private StopConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StopConnectorRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StopConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.StopConnectorRequest.class, + com.google.cloud.managedkafka.v1.StopConnectorRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.StopConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.StopConnectorRequest other = + (com.google.cloud.managedkafka.v1.StopConnectorRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.StopConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.StopConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Request for StopConnector.
+ * 
+   * Required. The name of the connector to stop.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to stop.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Request for StopConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.StopConnectorRequest) + com.google.cloud.managedkafka.v1.StopConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.StopConnectorRequest.class, + com.google.cloud.managedkafka.v1.StopConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.StopConnectorRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.StopConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorRequest build() { + com.google.cloud.managedkafka.v1.StopConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.StopConnectorRequest result = + new com.google.cloud.managedkafka.v1.StopConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.StopConnectorRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.StopConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.StopConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.StopConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.StopConnectorRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + * + * + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + * + * + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.StopConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.StopConnectorRequest) + private static final com.google.cloud.managedkafka.v1.StopConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.StopConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Required. The name of the connector to stop.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to stop.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to stop.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to stop.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+     * Required. The name of the connector to stop.
+     * Structured like:
+     * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+     * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StopConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..a760309cfc40 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface StopConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.StopConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + * + * + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponse.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponse.java new file mode 100644 index 000000000000..11387efe56b6 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponse.java @@ -0,0 +1,433 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.StopConnectorResponse} + */ +public final class StopConnectorResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.StopConnectorResponse) + StopConnectorResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use StopConnectorResponse.newBuilder() to construct. + private StopConnectorResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StopConnectorResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StopConnectorResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.StopConnectorResponse.class, + com.google.cloud.managedkafka.v1.StopConnectorResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.StopConnectorResponse)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.StopConnectorResponse other = + (com.google.cloud.managedkafka.v1.StopConnectorResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.StopConnectorResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.StopConnectorResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Required. The name of the connector to stop.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+   * Required. The name of the connector to stop.
+   * Structured like:
+   * projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector}
+   * 
+ * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + *
+ * Response for StopConnector.
+ * 
+   * Response for StopConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.StopConnectorResponse) + com.google.cloud.managedkafka.v1.StopConnectorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.StopConnectorResponse.class, + com.google.cloud.managedkafka.v1.StopConnectorResponse.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.StopConnectorResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_StopConnectorResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorResponse getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.StopConnectorResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorResponse build() { + com.google.cloud.managedkafka.v1.StopConnectorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorResponse buildPartial() { + com.google.cloud.managedkafka.v1.StopConnectorResponse result = + new com.google.cloud.managedkafka.v1.StopConnectorResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.StopConnectorResponse) { + return mergeFrom((com.google.cloud.managedkafka.v1.StopConnectorResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.StopConnectorResponse other) { + if (other == com.google.cloud.managedkafka.v1.StopConnectorResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.StopConnectorResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.StopConnectorResponse) + private static final com.google.cloud.managedkafka.v1.StopConnectorResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.StopConnectorResponse(); + } + + public static com.google.cloud.managedkafka.v1.StopConnectorResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StopConnectorResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.StopConnectorResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponseOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponseOrBuilder.java new file mode 100644 index 000000000000..5d9fe0ddc8de --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/StopConnectorResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface StopConnectorResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.StopConnectorResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicy.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicy.java new file mode 100644 index 000000000000..4db4b6950528 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicy.java @@ -0,0 +1,1071 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.TaskRetryPolicy} + */ +public final class TaskRetryPolicy extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.TaskRetryPolicy) + TaskRetryPolicyOrBuilder { + private static final long serialVersionUID = 0L; + // Use TaskRetryPolicy.newBuilder() to construct. + private TaskRetryPolicy(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TaskRetryPolicy() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TaskRetryPolicy(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.TaskRetryPolicy.class, + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder.class); + } + + private int bitField0_; + public static final int MINIMUM_BACKOFF_FIELD_NUMBER = 1; + private com.google.protobuf.Duration minimumBackoff_; + /** + * + * + * + * + * + * + * @return Whether the minimumBackoff field is set. + */ + @java.lang.Override + public boolean hasMinimumBackoff() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The minimumBackoff. + */ + @java.lang.Override + public com.google.protobuf.Duration getMinimumBackoff() { + return minimumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : minimumBackoff_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getMinimumBackoffOrBuilder() { + return minimumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : minimumBackoff_; + } + + public static final int MAXIMUM_BACKOFF_FIELD_NUMBER = 2; + private com.google.protobuf.Duration maximumBackoff_; + /** + * + * + * + * + * + * + * @return Whether the maximumBackoff field is set. + */ + @java.lang.Override + public boolean hasMaximumBackoff() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The maximumBackoff. + */ + @java.lang.Override + public com.google.protobuf.Duration getMaximumBackoff() { + return maximumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maximumBackoff_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getMaximumBackoffOrBuilder() { + return maximumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maximumBackoff_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMinimumBackoff()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getMaximumBackoff()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMinimumBackoff()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getMaximumBackoff()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.TaskRetryPolicy)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.TaskRetryPolicy other = + (com.google.cloud.managedkafka.v1.TaskRetryPolicy) obj; + + if (hasMinimumBackoff() != other.hasMinimumBackoff()) return false; + if (hasMinimumBackoff()) { + if (!getMinimumBackoff().equals(other.getMinimumBackoff())) return false; + } + if (hasMaximumBackoff() != other.hasMaximumBackoff()) return false; + if (hasMaximumBackoff()) { + if (!getMaximumBackoff().equals(other.getMaximumBackoff())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMinimumBackoff()) { + hash = (37 * hash) + MINIMUM_BACKOFF_FIELD_NUMBER; + hash = (53 * hash) + getMinimumBackoff().hashCode(); + } + if (hasMaximumBackoff()) { + hash = (37 * hash) + MAXIMUM_BACKOFF_FIELD_NUMBER; + hash = (53 * hash) + getMaximumBackoff().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.managedkafka.v1.TaskRetryPolicy prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.TaskRetryPolicy} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Task Retry Policy is implemented on a best-effort
+ * basis.
+ * Retry delay will be exponential based on provided minimum and maximum
+ * backoffs. https://en.wikipedia.org/wiki/Exponential_backoff.
+ * Note that the delay between consecutive task restarts may not always
+ * precisely match the configured settings. This can happen when the
+ * ConnectCluster is in rebalancing state or if the ConnectCluster is
+ * unresponsive etc.
+ * 
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Task Retry Policy is implemented on a best-effort
+   * basis.
+   * Retry delay will be exponential based on provided minimum and maximum
+   * backoffs. https://en.wikipedia.org/wiki/Exponential_backoff.
+   * Note that the delay between consecutive task restarts may not always
+   * precisely match the configured settings. This can happen when the
+   * ConnectCluster is in rebalancing state or if the ConnectCluster is
+   * unresponsive etc.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.TaskRetryPolicy) + com.google.cloud.managedkafka.v1.TaskRetryPolicyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.TaskRetryPolicy.class, + com.google.cloud.managedkafka.v1.TaskRetryPolicy.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.TaskRetryPolicy.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMinimumBackoffFieldBuilder(); + getMaximumBackoffFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + minimumBackoff_ = null; + if (minimumBackoffBuilder_ != null) { + minimumBackoffBuilder_.dispose(); + minimumBackoffBuilder_ = null; + } + maximumBackoff_ = null; + if (maximumBackoffBuilder_ != null) { + maximumBackoffBuilder_.dispose(); + maximumBackoffBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ResourcesProto + .internal_static_google_cloud_managedkafka_v1_TaskRetryPolicy_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy build() { + com.google.cloud.managedkafka.v1.TaskRetryPolicy result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy buildPartial() { + com.google.cloud.managedkafka.v1.TaskRetryPolicy result = + new com.google.cloud.managedkafka.v1.TaskRetryPolicy(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.TaskRetryPolicy result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.minimumBackoff_ = + minimumBackoffBuilder_ == null ? minimumBackoff_ : minimumBackoffBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.maximumBackoff_ = + maximumBackoffBuilder_ == null ? maximumBackoff_ : maximumBackoffBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.TaskRetryPolicy) { + return mergeFrom((com.google.cloud.managedkafka.v1.TaskRetryPolicy) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.TaskRetryPolicy other) { + if (other == com.google.cloud.managedkafka.v1.TaskRetryPolicy.getDefaultInstance()) + return this; + if (other.hasMinimumBackoff()) { + mergeMinimumBackoff(other.getMinimumBackoff()); + } + if (other.hasMaximumBackoff()) { + mergeMaximumBackoff(other.getMaximumBackoff()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getMinimumBackoffFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getMaximumBackoffFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Duration minimumBackoff_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + minimumBackoffBuilder_; + /** + * + * + * + * + * + * + * @return Whether the minimumBackoff field is set. + */ + public boolean hasMinimumBackoff() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The minimumBackoff. + */ + public com.google.protobuf.Duration getMinimumBackoff() { + if (minimumBackoffBuilder_ == null) { + return minimumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : minimumBackoff_; + } else { + return minimumBackoffBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setMinimumBackoff(com.google.protobuf.Duration value) { + if (minimumBackoffBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + minimumBackoff_ = value; + } else { + minimumBackoffBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setMinimumBackoff(com.google.protobuf.Duration.Builder builderForValue) { + if (minimumBackoffBuilder_ == null) { + minimumBackoff_ = builderForValue.build(); + } else { + minimumBackoffBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeMinimumBackoff(com.google.protobuf.Duration value) { + if (minimumBackoffBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && minimumBackoff_ != null + && minimumBackoff_ != com.google.protobuf.Duration.getDefaultInstance()) { + getMinimumBackoffBuilder().mergeFrom(value); + } else { + minimumBackoff_ = value; + } + } else { + minimumBackoffBuilder_.mergeFrom(value); + } + if (minimumBackoff_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearMinimumBackoff() { + bitField0_ = (bitField0_ & ~0x00000001); + minimumBackoff_ = null; + if (minimumBackoffBuilder_ != null) { + minimumBackoffBuilder_.dispose(); + minimumBackoffBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.protobuf.Duration.Builder getMinimumBackoffBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getMinimumBackoffFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.protobuf.DurationOrBuilder getMinimumBackoffOrBuilder() { + if (minimumBackoffBuilder_ != null) { + return minimumBackoffBuilder_.getMessageOrBuilder(); + } else { + return minimumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : minimumBackoff_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getMinimumBackoffFieldBuilder() { + if (minimumBackoffBuilder_ == null) { + minimumBackoffBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getMinimumBackoff(), getParentForChildren(), isClean()); + minimumBackoff_ = null; + } + return minimumBackoffBuilder_; + } + + private com.google.protobuf.Duration maximumBackoff_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + maximumBackoffBuilder_; + /** + * + * + * + * + * + * + * @return Whether the maximumBackoff field is set. + */ + public boolean hasMaximumBackoff() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The maximumBackoff. + */ + public com.google.protobuf.Duration getMaximumBackoff() { + if (maximumBackoffBuilder_ == null) { + return maximumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maximumBackoff_; + } else { + return maximumBackoffBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setMaximumBackoff(com.google.protobuf.Duration value) { + if (maximumBackoffBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + maximumBackoff_ = value; + } else { + maximumBackoffBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setMaximumBackoff(com.google.protobuf.Duration.Builder builderForValue) { + if (maximumBackoffBuilder_ == null) { + maximumBackoff_ = builderForValue.build(); + } else { + maximumBackoffBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeMaximumBackoff(com.google.protobuf.Duration value) { + if (maximumBackoffBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && maximumBackoff_ != null + && maximumBackoff_ != com.google.protobuf.Duration.getDefaultInstance()) { + getMaximumBackoffBuilder().mergeFrom(value); + } else { + maximumBackoff_ = value; + } + } else { + maximumBackoffBuilder_.mergeFrom(value); + } + if (maximumBackoff_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearMaximumBackoff() { + bitField0_ = (bitField0_ & ~0x00000002); + maximumBackoff_ = null; + if (maximumBackoffBuilder_ != null) { + maximumBackoffBuilder_.dispose(); + maximumBackoffBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.protobuf.Duration.Builder getMaximumBackoffBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getMaximumBackoffFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.protobuf.DurationOrBuilder getMaximumBackoffOrBuilder() { + if (maximumBackoffBuilder_ != null) { + return maximumBackoffBuilder_.getMessageOrBuilder(); + } else { + return maximumBackoff_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maximumBackoff_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getMaximumBackoffFieldBuilder() { + if (maximumBackoffBuilder_ == null) { + maximumBackoffBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getMaximumBackoff(), getParentForChildren(), isClean()); + maximumBackoff_ = null; + } + return maximumBackoffBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.TaskRetryPolicy) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.TaskRetryPolicy) + private static final com.google.cloud.managedkafka.v1.TaskRetryPolicy DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.TaskRetryPolicy(); + } + + public static com.google.cloud.managedkafka.v1.TaskRetryPolicy getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The minimum amount of time to wait before retrying a failed task.
+     * This sets a lower bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+     * Optional. The maximum amount of time to wait before retrying a failed task.
+     * This sets an upper bound for the backoff delay.
+     * 
+ * .google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TaskRetryPolicy parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.TaskRetryPolicy getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicyOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicyOrBuilder.java new file mode 100644 index 000000000000..2c3f6c455032 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/TaskRetryPolicyOrBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/resources.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface TaskRetryPolicyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.TaskRetryPolicy) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return Whether the minimumBackoff field is set. + */ + boolean hasMinimumBackoff(); + /** + * + * + * + * + * + * + * @return The minimumBackoff. + */ + com.google.protobuf.Duration getMinimumBackoff(); + /** + * + * + * + * + * + */ + com.google.protobuf.DurationOrBuilder getMinimumBackoffOrBuilder(); + + /** + * + * + * + * + * + * + * @return Whether the maximumBackoff field is set. + */ + boolean hasMaximumBackoff(); + /** + * + * + * + * + * + * + * @return The maximumBackoff. + */ + com.google.protobuf.Duration getMaximumBackoff(); + /** + * + * + * + * + * + */ + com.google.protobuf.DurationOrBuilder getMaximumBackoffOrBuilder(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequest.java new file mode 100644 index 000000000000..19f87f7e1def --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequest.java @@ -0,0 +1,1377 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.UpdateConnectClusterRequest} + */ +public final class UpdateConnectClusterRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.UpdateConnectClusterRequest) + UpdateConnectClusterRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use UpdateConnectClusterRequest.newBuilder() to construct. + private UpdateConnectClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private UpdateConnectClusterRequest() { + requestId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new UpdateConnectClusterRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.Builder.class); + } + + private int bitField0_; + public static final int UPDATE_MASK_FIELD_NUMBER = 1; + private com.google.protobuf.FieldMask updateMask_; + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int CONNECT_CLUSTER_FIELD_NUMBER = 2; + private com.google.cloud.managedkafka.v1.ConnectCluster connectCluster_; + /** + * + * + * + * + * + * + * @return Whether the connectCluster field is set. + */ + @java.lang.Override + public boolean hasConnectCluster() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The connectCluster. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster() { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder() { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + + public static final int REQUEST_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestId_ = ""; + /** + * + * + * + * + * + * + * @return The requestId. + */ + @java.lang.Override + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + * + * + * + * + * + * + * @return The bytes for requestId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getUpdateMask()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getConnectCluster()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, requestId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getUpdateMask()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getConnectCluster()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(requestId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, requestId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest other = + (com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest) obj; + + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (hasConnectCluster() != other.hasConnectCluster()) return false; + if (hasConnectCluster()) { + if (!getConnectCluster().equals(other.getConnectCluster())) return false; + } + if (!getRequestId().equals(other.getRequestId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + if (hasConnectCluster()) { + hash = (37 * hash) + CONNECT_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getConnectCluster().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.UpdateConnectClusterRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The minimum amount of time to wait before retrying a failed task.
+   * This sets a lower bound for the backoff delay.
+   * 
.google.protobuf.Duration minimum_backoff = 1 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+   * Optional. The maximum amount of time to wait before retrying a failed task.
+   * This sets an upper bound for the backoff delay.
+   * 
.google.protobuf.Duration maximum_backoff = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * Request for UpdateConnectCluster.
+ * 
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+   * Request for UpdateConnectCluster.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.UpdateConnectClusterRequest) + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.class, + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getUpdateMaskFieldBuilder(); + getConnectClusterFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + connectCluster_ = null; + if (connectClusterBuilder_ != null) { + connectClusterBuilder_.dispose(); + connectClusterBuilder_ = null; + } + requestId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest + getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest build() { + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest buildPartial() { + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest result = + new com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.connectCluster_ = + connectClusterBuilder_ == null ? connectCluster_ : connectClusterBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestId_ = requestId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest other) { + if (other + == com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest.getDefaultInstance()) + return this; + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (other.hasConnectCluster()) { + mergeConnectCluster(other.getConnectCluster()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getConnectClusterFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + requestId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000001); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private com.google.cloud.managedkafka.v1.ConnectCluster connectCluster_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + connectClusterBuilder_; + /** + * + * + * + * + * + * + * @return Whether the connectCluster field is set. + */ + public boolean hasConnectCluster() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The connectCluster. + */ + public com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster() { + if (connectClusterBuilder_ == null) { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } else { + return connectClusterBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setConnectCluster(com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + connectCluster_ = value; + } else { + connectClusterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setConnectCluster( + com.google.cloud.managedkafka.v1.ConnectCluster.Builder builderForValue) { + if (connectClusterBuilder_ == null) { + connectCluster_ = builderForValue.build(); + } else { + connectClusterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeConnectCluster(com.google.cloud.managedkafka.v1.ConnectCluster value) { + if (connectClusterBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && connectCluster_ != null + && connectCluster_ + != com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance()) { + getConnectClusterBuilder().mergeFrom(value); + } else { + connectCluster_ = value; + } + } else { + connectClusterBuilder_.mergeFrom(value); + } + if (connectCluster_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearConnectCluster() { + bitField0_ = (bitField0_ & ~0x00000002); + connectCluster_ = null; + if (connectClusterBuilder_ != null) { + connectClusterBuilder_.dispose(); + connectClusterBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.cloud.managedkafka.v1.ConnectCluster.Builder getConnectClusterBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getConnectClusterFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder() { + if (connectClusterBuilder_ != null) { + return connectClusterBuilder_.getMessageOrBuilder(); + } else { + return connectCluster_ == null + ? com.google.cloud.managedkafka.v1.ConnectCluster.getDefaultInstance() + : connectCluster_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder> + getConnectClusterFieldBuilder() { + if (connectClusterBuilder_ == null) { + connectClusterBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.ConnectCluster, + com.google.cloud.managedkafka.v1.ConnectCluster.Builder, + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder>( + getConnectCluster(), getParentForChildren(), isClean()); + connectCluster_ = null; + } + return connectClusterBuilder_; + } + + private java.lang.Object requestId_ = ""; + /** + * + * + * + * + * + * + * @return The requestId. + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + * + * + * + * + * @return The bytes for requestId. + */ + public com.google.protobuf.ByteString getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + * + * + * + * + * @param value The requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @return This builder for chaining. + */ + public Builder clearRequestId() { + requestId_ = getDefaultInstance().getRequestId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + * + * + * + * + * @param value The bytes for requestId to set. + * @return This builder for chaining. + */ + public Builder setRequestIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.UpdateConnectClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.UpdateConnectClusterRequest) + private static final com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest(); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The Kafka Connect cluster to update. Its `name` field must be
+     * populated.
+     * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+     * Optional. An optional request ID to identify requests. Specify a unique
+     * request ID to avoid duplication of requests. If a request times out or
+     * fails, retrying with the same ID allows the server to recognize the
+     * previous attempt. For at least 60 minutes, the server ignores duplicate
+     * requests bearing the same ID.
+     *
+     * For example, consider a situation where you make an initial request and the
+     * request times out. If you make the request again with the same request ID
+     * within 60 minutes of the last request, the server checks if an original
+     * operation with the same request ID was received. If so, the server ignores
+     * the second request.
+     *
+     * The request ID must be a valid UUID. A zero UUID is not supported
+     * (00000000-0000-0000-0000-000000000000).
+     * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateConnectClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequestOrBuilder.java new file mode 100644 index 000000000000..ad381db4d6fe --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectClusterRequestOrBuilder.java @@ -0,0 +1,175 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface UpdateConnectClusterRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.UpdateConnectClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + /** + * + * + * + * + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + /** + * + * + * + * + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + * + * + * + * + * @return Whether the connectCluster field is set. + */ + boolean hasConnectCluster(); + /** + * + * + * + * + * + * + * @return The connectCluster. + */ + com.google.cloud.managedkafka.v1.ConnectCluster getConnectCluster(); + /** + * + * + * + * + * + */ + com.google.cloud.managedkafka.v1.ConnectClusterOrBuilder getConnectClusterOrBuilder(); + + /** + * + * + * + * + * + * + * @return The requestId. + */ + java.lang.String getRequestId(); + /** + * + * + * + * + * + * + * @return The bytes for requestId. + */ + com.google.protobuf.ByteString getRequestIdBytes(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequest.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequest.java new file mode 100644 index 000000000000..1814202dedd7 --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequest.java @@ -0,0 +1,1071 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +/** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.UpdateConnectorRequest} + */ +public final class UpdateConnectorRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.managedkafka.v1.UpdateConnectorRequest) + UpdateConnectorRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use UpdateConnectorRequest.newBuilder() to construct. + private UpdateConnectorRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private UpdateConnectorRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new UpdateConnectorRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest.class, + com.google.cloud.managedkafka.v1.UpdateConnectorRequest.Builder.class); + } + + private int bitField0_; + public static final int UPDATE_MASK_FIELD_NUMBER = 1; + private com.google.protobuf.FieldMask updateMask_; + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int CONNECTOR_FIELD_NUMBER = 2; + private com.google.cloud.managedkafka.v1.Connector connector_; + /** + * + * + * + * + * + * + * @return Whether the connector field is set. + */ + @java.lang.Override + public boolean hasConnector() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The connector. + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.Connector getConnector() { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + /** + * + * + * + * + * + */ + @java.lang.Override + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder() { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getUpdateMask()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getConnector()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getUpdateMask()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getConnector()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.managedkafka.v1.UpdateConnectorRequest)) { + return super.equals(obj); + } + com.google.cloud.managedkafka.v1.UpdateConnectorRequest other = + (com.google.cloud.managedkafka.v1.UpdateConnectorRequest) obj; + + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (hasConnector() != other.hasConnector()) return false; + if (hasConnector()) { + if (!getConnector().equals(other.getConnector())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + if (hasConnector()) { + hash = (37 * hash) + CONNECTOR_FIELD_NUMBER; + hash = (53 * hash) + getConnector().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + * + * + * Protobuf type {@code google.cloud.managedkafka.v1.UpdateConnectorRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The Kafka Connect cluster to update. Its `name` field must be
+   * populated.
+   * 
+ * .google.cloud.managedkafka.v1.ConnectCluster connect_cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+   * Optional. An optional request ID to identify requests. Specify a unique
+   * request ID to avoid duplication of requests. If a request times out or
+   * fails, retrying with the same ID allows the server to recognize the
+   * previous attempt. For at least 60 minutes, the server ignores duplicate
+   * requests bearing the same ID.
+   *
+   * For example, consider a situation where you make an initial request and the
+   * request times out. If you make the request again with the same request ID
+   * within 60 minutes of the last request, the server checks if an original
+   * operation with the same request ID was received. If so, the server ignores
+   * the second request.
+   *
+   * The request ID must be a valid UUID. A zero UUID is not supported
+   * (00000000-0000-0000-0000-000000000000).
+   * 
+ * string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { ... } + *
+ * Request for UpdateConnector.
+ * 
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Request for UpdateConnector.
+   * 
+ implements + // @@protoc_insertion_point(builder_implements:google.cloud.managedkafka.v1.UpdateConnectorRequest) + com.google.cloud.managedkafka.v1.UpdateConnectorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.managedkafka.v1.UpdateConnectorRequest.class, + com.google.cloud.managedkafka.v1.UpdateConnectorRequest.Builder.class); + } + + // Construct using com.google.cloud.managedkafka.v1.UpdateConnectorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getUpdateMaskFieldBuilder(); + getConnectorFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + connector_ = null; + if (connectorBuilder_ != null) { + connectorBuilder_.dispose(); + connectorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.managedkafka.v1.ManagedKafkaConnectProto + .internal_static_google_cloud_managedkafka_v1_UpdateConnectorRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectorRequest getDefaultInstanceForType() { + return com.google.cloud.managedkafka.v1.UpdateConnectorRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectorRequest build() { + com.google.cloud.managedkafka.v1.UpdateConnectorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectorRequest buildPartial() { + com.google.cloud.managedkafka.v1.UpdateConnectorRequest result = + new com.google.cloud.managedkafka.v1.UpdateConnectorRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.cloud.managedkafka.v1.UpdateConnectorRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.connector_ = connectorBuilder_ == null ? connector_ : connectorBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.managedkafka.v1.UpdateConnectorRequest) { + return mergeFrom((com.google.cloud.managedkafka.v1.UpdateConnectorRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.managedkafka.v1.UpdateConnectorRequest other) { + if (other == com.google.cloud.managedkafka.v1.UpdateConnectorRequest.getDefaultInstance()) + return this; + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (other.hasConnector()) { + mergeConnector(other.getConnector()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getConnectorFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + * + * + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000001); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private com.google.cloud.managedkafka.v1.Connector connector_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + connectorBuilder_; + /** + * + * + * + * + * + * + * @return Whether the connector field is set. + */ + public boolean hasConnector() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + * + * + * + * + * @return The connector. + */ + public com.google.cloud.managedkafka.v1.Connector getConnector() { + if (connectorBuilder_ == null) { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } else { + return connectorBuilder_.getMessage(); + } + } + /** + * + * + * + * + * + */ + public Builder setConnector(com.google.cloud.managedkafka.v1.Connector value) { + if (connectorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + connector_ = value; + } else { + connectorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder setConnector( + com.google.cloud.managedkafka.v1.Connector.Builder builderForValue) { + if (connectorBuilder_ == null) { + connector_ = builderForValue.build(); + } else { + connectorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public Builder mergeConnector(com.google.cloud.managedkafka.v1.Connector value) { + if (connectorBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && connector_ != null + && connector_ != com.google.cloud.managedkafka.v1.Connector.getDefaultInstance()) { + getConnectorBuilder().mergeFrom(value); + } else { + connector_ = value; + } + } else { + connectorBuilder_.mergeFrom(value); + } + if (connector_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + * + * + * + */ + public Builder clearConnector() { + bitField0_ = (bitField0_ & ~0x00000002); + connector_ = null; + if (connectorBuilder_ != null) { + connectorBuilder_.dispose(); + connectorBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + * + * + * + */ + public com.google.cloud.managedkafka.v1.Connector.Builder getConnectorBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getConnectorFieldBuilder().getBuilder(); + } + /** + * + * + * + * + * + */ + public com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder() { + if (connectorBuilder_ != null) { + return connectorBuilder_.getMessageOrBuilder(); + } else { + return connector_ == null + ? com.google.cloud.managedkafka.v1.Connector.getDefaultInstance() + : connector_; + } + } + /** + * + * + * + * + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder> + getConnectorFieldBuilder() { + if (connectorBuilder_ == null) { + connectorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.managedkafka.v1.Connector, + com.google.cloud.managedkafka.v1.Connector.Builder, + com.google.cloud.managedkafka.v1.ConnectorOrBuilder>( + getConnector(), getParentForChildren(), isClean()); + connector_ = null; + } + return connectorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.managedkafka.v1.UpdateConnectorRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.managedkafka.v1.UpdateConnectorRequest) + private static final com.google.cloud.managedkafka.v1.UpdateConnectorRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.managedkafka.v1.UpdateConnectorRequest(); + } + + public static com.google.cloud.managedkafka.v1.UpdateConnectorRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. Field mask is used to specify the fields to be overwritten in the
+     * cluster resource by the update. The fields specified in the update_mask are
+     * relative to the resource, not the full request. A field will be overwritten
+     * if it is in the mask. The mask is required and a value of * will update all
+     * fields.
+     * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+     * Required. The connector to update. Its `name` field must be populated.
+     * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + * PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateConnectorRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.managedkafka.v1.UpdateConnectorRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequestOrBuilder.java b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequestOrBuilder.java new file mode 100644 index 000000000000..fdc3cbaa173f --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/java/com/google/cloud/managedkafka/v1/UpdateConnectorRequestOrBuilder.java @@ -0,0 +1,117 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/managedkafka/v1/managed_kafka_connect.proto + +// Protobuf Java Version: 3.25.5 +package com.google.cloud.managedkafka.v1; + +public interface UpdateConnectorRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.managedkafka.v1.UpdateConnectorRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + * + * + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + /** + * + * + * + * + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + /** + * + * + * + * + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + * + * + * + * + * @return Whether the connector field is set. + */ + boolean hasConnector(); + /** + * + * + * + * + * + * + * @return The connector. + */ + com.google.cloud.managedkafka.v1.Connector getConnector(); + /** + * + * + * + * + * + */ + com.google.cloud.managedkafka.v1.ConnectorOrBuilder getConnectorOrBuilder(); +} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/managed_kafka_connect.proto b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/managed_kafka_connect.proto new file mode 100644 index 000000000000..ede9dae5e2af --- /dev/null +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/managed_kafka_connect.proto @@ -0,0 +1,511 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.managedkafka.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/field_info.proto"; +import "google/api/resource.proto"; +import "google/cloud/managedkafka/v1/resources.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option csharp_namespace = "Google.Cloud.ManagedKafka.V1"; +option go_package = "cloud.google.com/go/managedkafka/apiv1/managedkafkapb;managedkafkapb"; +option java_multiple_files = true; +option java_outer_classname = "ManagedKafkaConnectProto"; +option java_package = "com.google.cloud.managedkafka.v1"; +option php_namespace = "Google\\Cloud\\ManagedKafka\\V1"; +option ruby_package = "Google::Cloud::ManagedKafka::V1"; + +// The service that a client application uses to manage Apache Kafka Connect +// clusters and connectors. +service ManagedKafkaConnect { + option (google.api.default_host) = "managedkafka.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Lists the Kafka Connect clusters in a given project and location. + rpc ListConnectClusters(ListConnectClustersRequest) + returns (ListConnectClustersResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*}/connectClusters" + }; + option (google.api.method_signature) = "parent"; + } + + // Returns the properties of a single Kafka Connect cluster. + rpc GetConnectCluster(GetConnectClusterRequest) returns (ConnectCluster) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/connectClusters/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new Kafka Connect cluster in a given project and location. + rpc CreateConnectCluster(CreateConnectClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/connectClusters" + body: "connect_cluster" + }; + option (google.api.method_signature) = + "parent,connect_cluster,connect_cluster_id"; + option (google.longrunning.operation_info) = { + response_type: "ConnectCluster" + metadata_type: "OperationMetadata" + }; + } + + // Updates the properties of a single Kafka Connect cluster. + rpc UpdateConnectCluster(UpdateConnectClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{connect_cluster.name=projects/*/locations/*/connectClusters/*}" + body: "connect_cluster" + }; + option (google.api.method_signature) = "connect_cluster,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "ConnectCluster" + metadata_type: "OperationMetadata" + }; + } + + // Deletes a single Connect cluster. + rpc DeleteConnectCluster(DeleteConnectClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/connectClusters/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "OperationMetadata" + }; + } + + // Lists the connectors in a given Connect cluster. + rpc ListConnectors(ListConnectorsRequest) returns (ListConnectorsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*/connectClusters/*}/connectors" + }; + option (google.api.method_signature) = "parent"; + } + + // Returns the properties of a single connector. + rpc GetConnector(GetConnectorRequest) returns (Connector) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new connector in a given Connect cluster. + rpc CreateConnector(CreateConnectorRequest) returns (Connector) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*/connectClusters/*}/connectors" + body: "connector" + }; + option (google.api.method_signature) = "parent,connector,connector_id"; + } + + // Updates the properties of a connector. + rpc UpdateConnector(UpdateConnectorRequest) returns (Connector) { + option (google.api.http) = { + patch: "/v1/{connector.name=projects/*/locations/*/connectClusters/*/connectors/*}" + body: "connector" + }; + option (google.api.method_signature) = "connector,update_mask"; + } + + // Deletes a connector. + rpc DeleteConnector(DeleteConnectorRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Pauses the connector and its tasks. + rpc PauseConnector(PauseConnectorRequest) returns (PauseConnectorResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:pause" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Resumes the connector and its tasks. + rpc ResumeConnector(ResumeConnectorRequest) + returns (ResumeConnectorResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:resume" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Restarts the connector. + rpc RestartConnector(RestartConnectorRequest) + returns (RestartConnectorResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:restart" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Stops the connector. + rpc StopConnector(StopConnectorRequest) returns (StopConnectorResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/locations/*/connectClusters/*/connectors/*}:stop" + body: "*" + }; + option (google.api.method_signature) = "name"; + } +} + +// Request for GetConnectCluster. +message GetConnectClusterRequest { + // Required. The name of the Kafka Connect cluster whose configuration to + // return. Structured like + // `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/ConnectCluster" + } + ]; +} + +// Request for CreateConnectCluster. +message CreateConnectClusterRequest { + // Required. The parent project/location in which to create the Kafka Connect + // cluster. Structured like + // `projects/{project}/locations/{location}/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "managedkafka.googleapis.com/ConnectCluster" + } + ]; + + // Required. The ID to use for the Connect cluster, which will become the + // final component of the cluster's name. The ID must be 1-63 characters long, + // and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply + // with RFC 1035. + // + // This value is structured like: `my-cluster-id`. + string connect_cluster_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Configuration of the Kafka Connect cluster to create. Its `name` + // field is ignored. + ConnectCluster connect_cluster = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. An optional request ID to identify requests. Specify a unique + // request ID to avoid duplication of requests. If a request times out or + // fails, retrying with the same ID allows the server to recognize the + // previous attempt. For at least 60 minutes, the server ignores duplicate + // requests bearing the same ID. + // + // For example, consider a situation where you make an initial request and the + // request times out. If you make the request again with the same request ID + // within 60 minutes of the last request, the server checks if an original + // operation with the same request ID was received. If so, the server ignores + // the second request. + // + // The request ID must be a valid UUID. A zero UUID is not supported + // (00000000-0000-0000-0000-000000000000). + string request_id = 4 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request for UpdateConnectCluster. +message UpdateConnectClusterRequest { + // Required. Field mask is used to specify the fields to be overwritten in the + // cluster resource by the update. The fields specified in the update_mask are + // relative to the resource, not the full request. A field will be overwritten + // if it is in the mask. The mask is required and a value of * will update all + // fields. + google.protobuf.FieldMask update_mask = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The Kafka Connect cluster to update. Its `name` field must be + // populated. + ConnectCluster connect_cluster = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. An optional request ID to identify requests. Specify a unique + // request ID to avoid duplication of requests. If a request times out or + // fails, retrying with the same ID allows the server to recognize the + // previous attempt. For at least 60 minutes, the server ignores duplicate + // requests bearing the same ID. + // + // For example, consider a situation where you make an initial request and the + // request times out. If you make the request again with the same request ID + // within 60 minutes of the last request, the server checks if an original + // operation with the same request ID was received. If so, the server ignores + // the second request. + // + // The request ID must be a valid UUID. A zero UUID is not supported + // (00000000-0000-0000-0000-000000000000). + string request_id = 3 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request for DeleteConnectCluster. +message DeleteConnectClusterRequest { + // Required. The name of the Kafka Connect cluster to delete. + // Structured like + // `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/ConnectCluster" + } + ]; + + // Optional. An optional request ID to identify requests. Specify a unique + // request ID to avoid duplication of requests. If a request times out or + // fails, retrying with the same ID allows the server to recognize the + // previous attempt. For at least 60 minutes, the server ignores duplicate + // requests bearing the same ID. + // + // For example, consider a situation where you make an initial request and the + // request times out. If you make the request again with the same request ID + // within 60 minutes of the last request, the server checks if an original + // operation with the same request ID was received. If so, the server ignores + // the second request. + // + // The request ID must be a valid UUID. A zero UUID is not supported + // (00000000-0000-0000-0000-000000000000). + string request_id = 2 [ + (google.api.field_info).format = UUID4, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Request for ListConnectClusters. +message ListConnectClustersRequest { + // Required. The parent project/location whose Connect clusters are to be + // listed. Structured like `projects/{project}/locations/{location}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "managedkafka.googleapis.com/ConnectCluster" + } + ]; + + // Optional. The maximum number of Connect clusters to return. The service may + // return fewer than this value. If unspecified, server will pick an + // appropriate default. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A page token, received from a previous `ListConnectClusters` + // call. Provide this to retrieve the subsequent page. + // + // When paginating, all other parameters provided to `ListConnectClusters` + // must match the call that provided the page token. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Filter expression for the result. + string filter = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Order by fields for the result. + string order_by = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response for ListConnectClusters. +message ListConnectClustersResponse { + // The list of Connect clusters in the requested parent. + repeated ConnectCluster connect_clusters = 1; + + // A token that can be sent as `page_token` to retrieve the next page of + // results. If this field is omitted, there are no more results. + string next_page_token = 2; + + // Locations that could not be reached. + repeated string unreachable = 3; +} + +// Request for GetConnector. +message GetConnectorRequest { + // Required. The name of the connector whose configuration to return. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Request for CreateConnector. +message CreateConnectorRequest { + // Required. The parent Connect cluster in which to create the connector. + // Structured like + // `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "managedkafka.googleapis.com/Connector" + } + ]; + + // Required. The ID to use for the connector, which will become the final + // component of the connector's name. The ID must be 1-63 characters long, and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + // RFC 1035. + // + // This value is structured like: `my-connector-id`. + string connector_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The connector to create. + Connector connector = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for UpdateConnector. +message UpdateConnectorRequest { + // Required. Field mask is used to specify the fields to be overwritten in the + // cluster resource by the update. The fields specified in the update_mask are + // relative to the resource, not the full request. A field will be overwritten + // if it is in the mask. The mask is required and a value of * will update all + // fields. + google.protobuf.FieldMask update_mask = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The connector to update. Its `name` field must be populated. + Connector connector = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for DeleteConnector. +message DeleteConnectorRequest { + // Required. The name of the connector to delete. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Request for ListConnectors. +message ListConnectorsRequest { + // Required. The parent Connect cluster whose connectors are to be listed. + // Structured like + // `projects/{project}/locations/{location}/connectClusters/{connect_cluster_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "managedkafka.googleapis.com/Connector" + } + ]; + + // Optional. The maximum number of connectors to return. The service may + // return fewer than this value. If unspecified, server will pick an + // appropriate default. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A page token, received from a previous `ListConnectors` call. + // Provide this to retrieve the subsequent page. + // + // When paginating, all other parameters provided to `ListConnectors` + // must match the call that provided the page token. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response for ListConnectors. +message ListConnectorsResponse { + // The list of connectors in the requested parent. + repeated Connector connectors = 1; + + // A token that can be sent as `page_token` to retrieve the next page of + // results. If this field is omitted, there are no more results. + string next_page_token = 2; +} + +// Request for PauseConnector. +message PauseConnectorRequest { + // Required. The name of the connector to pause. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Response for PauseConnector. +message PauseConnectorResponse {} + +// Request for ResumeConnector. +message ResumeConnectorRequest { + // Required. The name of the connector to pause. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Response for ResumeConnector. +message ResumeConnectorResponse {} + +// Request for RestartConnector. +message RestartConnectorRequest { + // Required. The name of the connector to restart. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Response for RestartConnector. +message RestartConnectorResponse {} + +// Request for StopConnector. +message StopConnectorRequest { + // Required. The name of the connector to stop. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connectCluster}/connectors/{connector} + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "managedkafka.googleapis.com/Connector" + } + ]; +} + +// Response for StopConnector. +message StopConnectorResponse {} diff --git a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/resources.proto b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/resources.proto index e637f15f4563..fdc8fa4025aa 100644 --- a/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/resources.proto +++ b/java-managedkafka/proto-google-cloud-managedkafka-v1/src/main/proto/google/cloud/managedkafka/v1/resources.proto @@ -18,6 +18,7 @@ package google.cloud.managedkafka.v1; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Google.Cloud.ManagedKafka.V1"; @@ -35,6 +36,10 @@ option (google.api.resource_definition) = { type: "secretmanager.googleapis.com/SecretVersion" pattern: "projects/{project}/secrets/{secret}/versions/{secret_version}" }; +option (google.api.resource_definition) = { + type: "privateca.googleapis.com/CaPool" + pattern: "projects/{project}/locations/{location}/caPools/{ca_pool}" +}; // An Apache Kafka cluster deployed in a location. message Cluster { @@ -272,3 +277,205 @@ message OperationMetadata { // Output only. API version used to start the operation. string api_version = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// An Apache Kafka Connect cluster deployed in a location. +message ConnectCluster { + option (google.api.resource) = { + type: "managedkafka.googleapis.com/ConnectCluster" + pattern: "projects/{project}/locations/{location}/connectClusters/{connect_cluster}" + plural: "connectClusters" + singular: "connectCluster" + }; + + // The state of the cluster. + enum State { + // A state was not specified. + STATE_UNSPECIFIED = 0; + + // The cluster is being created. + CREATING = 1; + + // The cluster is active. + ACTIVE = 2; + + // The cluster is being deleted. + DELETING = 3; + } + + // Platform specific configuration properties for a Kafka Connect cluster. + oneof platform_config { + // Required. Configuration properties for a Kafka Connect cluster deployed + // to Google Cloud Platform. + ConnectGcpConfig gcp_config = 7 [(google.api.field_behavior) = REQUIRED]; + } + + // Identifier. The name of the Kafka Connect cluster. Structured like: + // projects/{project_number}/locations/{location}/connectClusters/{connect_cluster_id} + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Required. Immutable. The name of the Kafka cluster this Kafka Connect + // cluster is attached to. Structured like: + // projects/{project}/locations/{location}/clusters/{cluster} + string kafka_cluster = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Output only. The time when the cluster was created. + google.protobuf.Timestamp create_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the cluster was last updated. + google.protobuf.Timestamp update_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Labels as key value pairs. + map
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. Field mask is used to specify the fields to be overwritten in the
+   * cluster resource by the update. The fields specified in the update_mask are
+   * relative to the resource, not the full request. A field will be overwritten
+   * if it is in the mask. The mask is required and a value of * will update all
+   * fields.
+   * 
.google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + *
+   * Required. The connector to update. Its `name` field must be populated.
+   * 
+ * .google.cloud.managedkafka.v1.Connector connector = 2 [(.google.api.field_behavior) = REQUIRED]; + * labels = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Capacity configuration for the Kafka Connect cluster. + CapacityConfig capacity_config = 6 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The current state of the cluster. + State state = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Configurations for the worker that are overridden from the + // defaults. The key of the map is a Kafka Connect worker property name, for + // example: `exactly.once.source.support`. + map config = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// The configuration of a Virtual Private Cloud (VPC) network that can access +// the Kafka Connect cluster. +message ConnectNetworkConfig { + // Required. VPC subnet to make available to the Kafka Connect cluster. + // Structured like: + // projects/{project}/regions/{region}/subnetworks/{subnet_id} + // + // It is used to create a Private Service Connect (PSC) interface for the + // Kafka Connect workers. It must be located in the same region as the + // Kafka Connect cluster. + // + // The CIDR range of the subnet must be within the IPv4 address ranges for + // private networks, as specified in RFC 1918. The primary subnet CIDR range + // must have a minimum size of /22 (1024 addresses). + string primary_subnet = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Additional subnets may be specified. They may be in another + // region, but must be in the same VPC network. The Connect workers can + // communicate with network endpoints in either the primary or additional + // subnets. + repeated string additional_subnets = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional DNS domain names from the subnet's network to be made + // visible to the Connect Cluster. When using MirrorMaker2, it's necessary to + // add the bootstrap address's dns domain name of the target cluster to make + // it visible to the connector. For example: + // my-kafka-cluster.us-central1.managedkafka.my-project.cloud.goog + repeated string dns_domain_names = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The configuration of access to the Kafka Connect cluster. +message ConnectAccessConfig { + // Required. + // Virtual Private Cloud (VPC) networks that must be granted direct access to + // the Kafka Connect cluster. Minimum of 1 network is required. Maximum 10 + // networks can be specified. + repeated ConnectNetworkConfig network_configs = 1 + [(google.api.field_behavior) = REQUIRED]; +} + +// Configuration properties for a Kafka Connect cluster deployed to Google Cloud +// Platform. +message ConnectGcpConfig { + // Required. Access configuration for the Kafka Connect cluster. + ConnectAccessConfig access_config = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Secrets to load into workers. Exact SecretVersions from Secret + // Manager must be provided -- aliases are not supported. Up to 32 secrets may + // be loaded into one cluster. Format: + // projects//secrets//versions/ + repeated string secret_paths = 2 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "secretmanager.googleapis.com/SecretVersion" + } + ]; +} + +// A Kafka Connect connector in a given ConnectCluster. +message Connector { + option (google.api.resource) = { + type: "managedkafka.googleapis.com/Connector" + pattern: "projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector}" + plural: "connectors" + singular: "connector" + }; + + // The state of the connector. + enum State { + // A state was not specified. + STATE_UNSPECIFIED = 0; + + // The connector is not assigned to any tasks, usually transient. + UNASSIGNED = 1; + + // The connector is running. + RUNNING = 2; + + // The connector has been paused. + PAUSED = 3; + + // The connector has failed. See logs for why. + FAILED = 4; + + // The connector is restarting. + RESTARTING = 5; + + // The connector has been stopped. + STOPPED = 6; + } + + // A policy that specifies how to restart the failed connectors/tasks in a + // Cluster resource. If not set, the failed connectors/tasks won't be + // restarted. + oneof restart_policy { + // Optional. Restarts the individual tasks of a Connector. + TaskRetryPolicy task_restart_policy = 4 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Identifier. The name of the connector. + // Structured like: + // projects/{project}/locations/{location}/connectClusters/{connect_cluster}/connectors/{connector} + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Optional. Connector config as keys/values. + // The keys of the map are connector property names, for example: + // `connector.class`, `tasks.max`, `key.converter`. + map configs = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The current state of the connector. + State state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Task Retry Policy is implemented on a best-effort +// basis. +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// Note that the delay between consecutive task restarts may not always +// precisely match the configured settings. This can happen when the +// ConnectCluster is in rebalancing state or if the ConnectCluster is +// unresponsive etc. +message TaskRetryPolicy { + // Optional. The minimum amount of time to wait before retrying a failed task. + // This sets a lower bound for the backoff delay. + google.protobuf.Duration minimum_backoff = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum amount of time to wait before retrying a failed task. + // This sets an upper bound for the backoff delay. + google.protobuf.Duration maximum_backoff = 2 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetCredentialsProvider.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetCredentialsProvider.java new file mode 100644 index 000000000000..db7983e704f7 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetCredentialsProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_Create_SetCredentialsProvider_sync] +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectSettings; +import com.google.cloud.managedkafka.v1.myCredentials; + +public class SyncCreateSetCredentialsProvider { + + public static void main(String[] args) throws Exception { + syncCreateSetCredentialsProvider(); + } + + public static void syncCreateSetCredentialsProvider() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectSettings managedKafkaConnectSettings = + ManagedKafkaConnectSettings.newBuilder() + .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials)) + .build(); + ManagedKafkaConnectClient managedKafkaConnectClient = + ManagedKafkaConnectClient.create(managedKafkaConnectSettings); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_Create_SetCredentialsProvider_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetEndpoint.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetEndpoint.java new file mode 100644 index 000000000000..5b3cf95df43e --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateSetEndpoint.java @@ -0,0 +1,42 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_Create_SetEndpoint_sync] +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectSettings; +import com.google.cloud.managedkafka.v1.myEndpoint; + +public class SyncCreateSetEndpoint { + + public static void main(String[] args) throws Exception { + syncCreateSetEndpoint(); + } + + public static void syncCreateSetEndpoint() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectSettings managedKafkaConnectSettings = + ManagedKafkaConnectSettings.newBuilder().setEndpoint(myEndpoint).build(); + ManagedKafkaConnectClient managedKafkaConnectClient = + ManagedKafkaConnectClient.create(managedKafkaConnectSettings); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_Create_SetEndpoint_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateUseHttpJsonTransport.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateUseHttpJsonTransport.java new file mode 100644 index 000000000000..e3bfbbccdc37 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/create/SyncCreateUseHttpJsonTransport.java @@ -0,0 +1,41 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_Create_UseHttpJsonTransport_sync] +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectSettings; + +public class SyncCreateUseHttpJsonTransport { + + public static void main(String[] args) throws Exception { + syncCreateUseHttpJsonTransport(); + } + + public static void syncCreateUseHttpJsonTransport() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectSettings managedKafkaConnectSettings = + ManagedKafkaConnectSettings.newHttpJsonBuilder().build(); + ManagedKafkaConnectClient managedKafkaConnectClient = + ManagedKafkaConnectClient.create(managedKafkaConnectSettings); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_Create_UseHttpJsonTransport_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectCluster.java new file mode 100644 index 000000000000..743fdedb6b38 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectCluster.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.longrunning.Operation; + +public class AsyncCreateConnectCluster { + + public static void main(String[] args) throws Exception { + asyncCreateConnectCluster(); + } + + public static void asyncCreateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + CreateConnectClusterRequest request = + CreateConnectClusterRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setConnectClusterId("connectClusterId-1562078485") + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + ApiFuture future = + managedKafkaConnectClient.createConnectClusterCallable().futureCall(request); + // Do something. + Operation response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectClusterLRO.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectClusterLRO.java new file mode 100644 index 000000000000..9fe0741306dc --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/AsyncCreateConnectClusterLRO.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_LRO_async] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.OperationMetadata; + +public class AsyncCreateConnectClusterLRO { + + public static void main(String[] args) throws Exception { + asyncCreateConnectClusterLRO(); + } + + public static void asyncCreateConnectClusterLRO() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + CreateConnectClusterRequest request = + CreateConnectClusterRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setConnectClusterId("connectClusterId-1562078485") + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + OperationFuture future = + managedKafkaConnectClient.createConnectClusterOperationCallable().futureCall(request); + // Do something. + ConnectCluster response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_LRO_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectCluster.java new file mode 100644 index 000000000000..5e42bd8ccc91 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectCluster.java @@ -0,0 +1,49 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.CreateConnectClusterRequest; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnectCluster { + + public static void main(String[] args) throws Exception { + syncCreateConnectCluster(); + } + + public static void syncCreateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + CreateConnectClusterRequest request = + CreateConnectClusterRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setConnectClusterId("connectClusterId-1562078485") + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + ConnectCluster response = managedKafkaConnectClient.createConnectClusterAsync(request).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterLocationnameConnectclusterString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterLocationnameConnectclusterString.java new file mode 100644 index 000000000000..7bbdfa1d2d96 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterLocationnameConnectclusterString.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_LocationnameConnectclusterString_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnectClusterLocationnameConnectclusterString { + + public static void main(String[] args) throws Exception { + syncCreateConnectClusterLocationnameConnectclusterString(); + } + + public static void syncCreateConnectClusterLocationnameConnectclusterString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + ConnectCluster response = + managedKafkaConnectClient + .createConnectClusterAsync(parent, connectCluster, connectClusterId) + .get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_LocationnameConnectclusterString_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterStringConnectclusterString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterStringConnectclusterString.java new file mode 100644 index 000000000000..5cd71896b2f4 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnectcluster/SyncCreateConnectClusterStringConnectclusterString.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_StringConnectclusterString_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnectClusterStringConnectclusterString { + + public static void main(String[] args) throws Exception { + syncCreateConnectClusterStringConnectclusterString(); + } + + public static void syncCreateConnectClusterStringConnectclusterString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String parent = LocationName.of("[PROJECT]", "[LOCATION]").toString(); + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + String connectClusterId = "connectClusterId-1562078485"; + ConnectCluster response = + managedKafkaConnectClient + .createConnectClusterAsync(parent, connectCluster, connectClusterId) + .get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnectCluster_StringConnectclusterString_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/AsyncCreateConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/AsyncCreateConnector.java new file mode 100644 index 000000000000..e66818ef17e2 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/AsyncCreateConnector.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncCreateConnector { + + public static void main(String[] args) throws Exception { + asyncCreateConnector(); + } + + public static void asyncCreateConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + CreateConnectorRequest request = + CreateConnectorRequest.newBuilder() + .setParent( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setConnectorId("connectorId1724784200") + .setConnector(Connector.newBuilder().build()) + .build(); + ApiFuture future = + managedKafkaConnectClient.createConnectorCallable().futureCall(request); + // Do something. + Connector response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnector.java new file mode 100644 index 000000000000..0c24bb5e0dd4 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnector.java @@ -0,0 +1,49 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.CreateConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnector { + + public static void main(String[] args) throws Exception { + syncCreateConnector(); + } + + public static void syncCreateConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + CreateConnectorRequest request = + CreateConnectorRequest.newBuilder() + .setParent( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setConnectorId("connectorId1724784200") + .setConnector(Connector.newBuilder().build()) + .build(); + Connector response = managedKafkaConnectClient.createConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorConnectclusternameConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorConnectclusternameConnectorString.java new file mode 100644 index 000000000000..1de2f895c7f3 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorConnectclusternameConnectorString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_ConnectclusternameConnectorString_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnectorConnectclusternameConnectorString { + + public static void main(String[] args) throws Exception { + syncCreateConnectorConnectclusternameConnectorString(); + } + + public static void syncCreateConnectorConnectclusternameConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + Connector response = + managedKafkaConnectClient.createConnector(parent, connector, connectorId); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_ConnectclusternameConnectorString_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorStringConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorStringConnectorString.java new file mode 100644 index 000000000000..6f661b367c4a --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/createconnector/SyncCreateConnectorStringConnectorString.java @@ -0,0 +1,46 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_StringConnectorString_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncCreateConnectorStringConnectorString { + + public static void main(String[] args) throws Exception { + syncCreateConnectorStringConnectorString(); + } + + public static void syncCreateConnectorStringConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString(); + Connector connector = Connector.newBuilder().build(); + String connectorId = "connectorId1724784200"; + Connector response = + managedKafkaConnectClient.createConnector(parent, connector, connectorId); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_CreateConnector_StringConnectorString_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectCluster.java new file mode 100644 index 000000000000..ef28d983fc02 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectCluster.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.longrunning.Operation; + +public class AsyncDeleteConnectCluster { + + public static void main(String[] args) throws Exception { + asyncDeleteConnectCluster(); + } + + public static void asyncDeleteConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + DeleteConnectClusterRequest request = + DeleteConnectClusterRequest.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setRequestId("requestId693933066") + .build(); + ApiFuture future = + managedKafkaConnectClient.deleteConnectClusterCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectClusterLRO.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectClusterLRO.java new file mode 100644 index 000000000000..bac3ec227a29 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/AsyncDeleteConnectClusterLRO.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_LRO_async] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.protobuf.Empty; + +public class AsyncDeleteConnectClusterLRO { + + public static void main(String[] args) throws Exception { + asyncDeleteConnectClusterLRO(); + } + + public static void asyncDeleteConnectClusterLRO() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + DeleteConnectClusterRequest request = + DeleteConnectClusterRequest.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setRequestId("requestId693933066") + .build(); + OperationFuture future = + managedKafkaConnectClient.deleteConnectClusterOperationCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_LRO_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectCluster.java new file mode 100644 index 000000000000..548b6b1bb635 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectCluster.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.DeleteConnectClusterRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnectCluster { + + public static void main(String[] args) throws Exception { + syncDeleteConnectCluster(); + } + + public static void syncDeleteConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + DeleteConnectClusterRequest request = + DeleteConnectClusterRequest.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setRequestId("requestId693933066") + .build(); + managedKafkaConnectClient.deleteConnectClusterAsync(request).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterConnectclustername.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterConnectclustername.java new file mode 100644 index 000000000000..a8a419a5dd86 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterConnectclustername.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_Connectclustername_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnectClusterConnectclustername { + + public static void main(String[] args) throws Exception { + syncDeleteConnectClusterConnectclustername(); + } + + public static void syncDeleteConnectClusterConnectclustername() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + managedKafkaConnectClient.deleteConnectClusterAsync(name).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_Connectclustername_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterString.java new file mode 100644 index 000000000000..8d5fee520b07 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnectcluster/SyncDeleteConnectClusterString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_String_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnectClusterString { + + public static void main(String[] args) throws Exception { + syncDeleteConnectClusterString(); + } + + public static void syncDeleteConnectClusterString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString(); + managedKafkaConnectClient.deleteConnectClusterAsync(name).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnectCluster_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/AsyncDeleteConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/AsyncDeleteConnector.java new file mode 100644 index 000000000000..7b0aa80726d3 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/AsyncDeleteConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class AsyncDeleteConnector { + + public static void main(String[] args) throws Exception { + asyncDeleteConnector(); + } + + public static void asyncDeleteConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + DeleteConnectorRequest request = + DeleteConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.deleteConnectorCallable().futureCall(request); + // Do something. + future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnector.java new file mode 100644 index 000000000000..c99000325de7 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.DeleteConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnector { + + public static void main(String[] args) throws Exception { + syncDeleteConnector(); + } + + public static void syncDeleteConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + DeleteConnectorRequest request = + DeleteConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + managedKafkaConnectClient.deleteConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorConnectorname.java new file mode 100644 index 000000000000..f548cae950bc --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncDeleteConnectorConnectorname(); + } + + public static void syncDeleteConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + managedKafkaConnectClient.deleteConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorString.java new file mode 100644 index 000000000000..ea3bf81d41a9 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/deleteconnector/SyncDeleteConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_String_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.Empty; + +public class SyncDeleteConnectorString { + + public static void main(String[] args) throws Exception { + syncDeleteConnectorString(); + } + + public static void syncDeleteConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + managedKafkaConnectClient.deleteConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_DeleteConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/AsyncGetConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/AsyncGetConnectCluster.java new file mode 100644 index 000000000000..07e7e8db650f --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/AsyncGetConnectCluster.java @@ -0,0 +1,51 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncGetConnectCluster { + + public static void main(String[] args) throws Exception { + asyncGetConnectCluster(); + } + + public static void asyncGetConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetConnectClusterRequest request = + GetConnectClusterRequest.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.getConnectClusterCallable().futureCall(request); + // Do something. + ConnectCluster response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectCluster.java new file mode 100644 index 000000000000..376fb859824c --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectCluster.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.GetConnectClusterRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnectCluster { + + public static void main(String[] args) throws Exception { + syncGetConnectCluster(); + } + + public static void syncGetConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetConnectClusterRequest request = + GetConnectClusterRequest.newBuilder() + .setName( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .build(); + ConnectCluster response = managedKafkaConnectClient.getConnectCluster(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterConnectclustername.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterConnectclustername.java new file mode 100644 index 000000000000..647871113de7 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterConnectclustername.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_Connectclustername_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnectClusterConnectclustername { + + public static void main(String[] args) throws Exception { + syncGetConnectClusterConnectclustername(); + } + + public static void syncGetConnectClusterConnectclustername() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectClusterName name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_Connectclustername_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterString.java new file mode 100644 index 000000000000..14df1c1c5fad --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnectcluster/SyncGetConnectClusterString.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_String_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnectClusterString { + + public static void main(String[] args) throws Exception { + syncGetConnectClusterString(); + } + + public static void syncGetConnectClusterString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString(); + ConnectCluster response = managedKafkaConnectClient.getConnectCluster(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnectCluster_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/AsyncGetConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/AsyncGetConnector.java new file mode 100644 index 000000000000..cff7289f4b94 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/AsyncGetConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncGetConnector { + + public static void main(String[] args) throws Exception { + asyncGetConnector(); + } + + public static void asyncGetConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetConnectorRequest request = + GetConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.getConnectorCallable().futureCall(request); + // Do something. + Connector response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnector.java new file mode 100644 index 000000000000..08dc3814a165 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_sync] +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.GetConnectorRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnector { + + public static void main(String[] args) throws Exception { + syncGetConnector(); + } + + public static void syncGetConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetConnectorRequest request = + GetConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + Connector response = managedKafkaConnectClient.getConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorConnectorname.java new file mode 100644 index 000000000000..9c085a0a6da1 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncGetConnectorConnectorname(); + } + + public static void syncGetConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + Connector response = managedKafkaConnectClient.getConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorString.java new file mode 100644 index 000000000000..30fa749524b5 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getconnector/SyncGetConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_String_sync] +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetConnectorString { + + public static void main(String[] args) throws Exception { + syncGetConnectorString(); + } + + public static void syncGetConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + Connector response = managedKafkaConnectClient.getConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/AsyncGetLocation.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/AsyncGetLocation.java new file mode 100644 index 000000000000..9faae43fd9c8 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/AsyncGetLocation.java @@ -0,0 +1,46 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetLocation_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncGetLocation { + + public static void main(String[] args) throws Exception { + asyncGetLocation(); + } + + public static void asyncGetLocation() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build(); + ApiFuture future = + managedKafkaConnectClient.getLocationCallable().futureCall(request); + // Do something. + Location response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetLocation_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/SyncGetLocation.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/SyncGetLocation.java new file mode 100644 index 000000000000..769bac72c52a --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/getlocation/SyncGetLocation.java @@ -0,0 +1,42 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_GetLocation_sync] +import com.google.cloud.location.GetLocationRequest; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncGetLocation { + + public static void main(String[] args) throws Exception { + syncGetLocation(); + } + + public static void syncGetLocation() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + GetLocationRequest request = GetLocationRequest.newBuilder().setName("name3373707").build(); + Location response = managedKafkaConnectClient.getLocation(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_GetLocation_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClusters.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClusters.java new file mode 100644 index 000000000000..1efa2e6fc488 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClusters.java @@ -0,0 +1,56 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncListConnectClusters { + + public static void main(String[] args) throws Exception { + asyncListConnectClusters(); + } + + public static void asyncListConnectClusters() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectClustersRequest request = + ListConnectClustersRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .setFilter("filter-1274492040") + .setOrderBy("orderBy-1207110587") + .build(); + ApiFuture future = + managedKafkaConnectClient.listConnectClustersPagedCallable().futureCall(request); + // Do something. + for (ConnectCluster element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClustersPaged.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClustersPaged.java new file mode 100644 index 000000000000..b09393bb5995 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/AsyncListConnectClustersPaged.java @@ -0,0 +1,64 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_Paged_async] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.ListConnectClustersResponse; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.common.base.Strings; + +public class AsyncListConnectClustersPaged { + + public static void main(String[] args) throws Exception { + asyncListConnectClustersPaged(); + } + + public static void asyncListConnectClustersPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectClustersRequest request = + ListConnectClustersRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .setFilter("filter-1274492040") + .setOrderBy("orderBy-1207110587") + .build(); + while (true) { + ListConnectClustersResponse response = + managedKafkaConnectClient.listConnectClustersCallable().call(request); + for (ConnectCluster element : response.getConnectClustersList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_Paged_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClusters.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClusters.java new file mode 100644 index 000000000000..81b0b5399a98 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClusters.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ListConnectClustersRequest; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectClusters { + + public static void main(String[] args) throws Exception { + syncListConnectClusters(); + } + + public static void syncListConnectClusters() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectClustersRequest request = + ListConnectClustersRequest.newBuilder() + .setParent(LocationName.of("[PROJECT]", "[LOCATION]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .setFilter("filter-1274492040") + .setOrderBy("orderBy-1207110587") + .build(); + for (ConnectCluster element : + managedKafkaConnectClient.listConnectClusters(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersLocationname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersLocationname.java new file mode 100644 index 000000000000..b24427b83a3b --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersLocationname.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_Locationname_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectClustersLocationname { + + public static void main(String[] args) throws Exception { + syncListConnectClustersLocationname(); + } + + public static void syncListConnectClustersLocationname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + LocationName parent = LocationName.of("[PROJECT]", "[LOCATION]"); + for (ConnectCluster element : + managedKafkaConnectClient.listConnectClusters(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_Locationname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersString.java new file mode 100644 index 000000000000..f80bad6bde97 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectclusters/SyncListConnectClustersString.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_String_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.LocationName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectClustersString { + + public static void main(String[] args) throws Exception { + syncListConnectClustersString(); + } + + public static void syncListConnectClustersString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String parent = LocationName.of("[PROJECT]", "[LOCATION]").toString(); + for (ConnectCluster element : + managedKafkaConnectClient.listConnectClusters(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectClusters_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectors.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectors.java new file mode 100644 index 000000000000..ad027adb1c98 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectors.java @@ -0,0 +1,55 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncListConnectors { + + public static void main(String[] args) throws Exception { + asyncListConnectors(); + } + + public static void asyncListConnectors() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectorsRequest request = + ListConnectorsRequest.newBuilder() + .setParent( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + ApiFuture future = + managedKafkaConnectClient.listConnectorsPagedCallable().futureCall(request); + // Do something. + for (Connector element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectorsPaged.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectorsPaged.java new file mode 100644 index 000000000000..c99a099a9077 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/AsyncListConnectorsPaged.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_Paged_async] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ListConnectorsResponse; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.common.base.Strings; + +public class AsyncListConnectorsPaged { + + public static void main(String[] args) throws Exception { + asyncListConnectorsPaged(); + } + + public static void asyncListConnectorsPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectorsRequest request = + ListConnectorsRequest.newBuilder() + .setParent( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + while (true) { + ListConnectorsResponse response = + managedKafkaConnectClient.listConnectorsCallable().call(request); + for (Connector element : response.getConnectorsList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_Paged_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectors.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectors.java new file mode 100644 index 000000000000..2ccdf4bf8af3 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectors.java @@ -0,0 +1,51 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ListConnectorsRequest; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectors { + + public static void main(String[] args) throws Exception { + syncListConnectors(); + } + + public static void syncListConnectors() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListConnectorsRequest request = + ListConnectorsRequest.newBuilder() + .setParent( + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString()) + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + for (Connector element : managedKafkaConnectClient.listConnectors(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsConnectclustername.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsConnectclustername.java new file mode 100644 index 000000000000..7b7b6c6d8096 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsConnectclustername.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_Connectclustername_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectorsConnectclustername { + + public static void main(String[] args) throws Exception { + syncListConnectorsConnectclustername(); + } + + public static void syncListConnectorsConnectclustername() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectClusterName parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]"); + for (Connector element : managedKafkaConnectClient.listConnectors(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_Connectclustername_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsString.java new file mode 100644 index 000000000000..874827cbf515 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listconnectors/SyncListConnectorsString.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_String_sync] +import com.google.cloud.managedkafka.v1.ConnectClusterName; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListConnectorsString { + + public static void main(String[] args) throws Exception { + syncListConnectorsString(); + } + + public static void syncListConnectorsString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String parent = + ConnectClusterName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]").toString(); + for (Connector element : managedKafkaConnectClient.listConnectors(parent).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListConnectors_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocations.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocations.java new file mode 100644 index 000000000000..085c41f2197b --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocations.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class AsyncListLocations { + + public static void main(String[] args) throws Exception { + asyncListLocations(); + } + + public static void asyncListLocations() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("name3373707") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + ApiFuture future = + managedKafkaConnectClient.listLocationsPagedCallable().futureCall(request); + // Do something. + for (Location element : future.get().iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocationsPaged.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocationsPaged.java new file mode 100644 index 000000000000..ff05a2a3859c --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/AsyncListLocationsPaged.java @@ -0,0 +1,62 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_Paged_async] +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.ListLocationsResponse; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.common.base.Strings; + +public class AsyncListLocationsPaged { + + public static void main(String[] args) throws Exception { + asyncListLocationsPaged(); + } + + public static void asyncListLocationsPaged() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("name3373707") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + while (true) { + ListLocationsResponse response = + managedKafkaConnectClient.listLocationsCallable().call(request); + for (Location element : response.getLocationsList()) { + // doThingsWith(element); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + request = request.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_Paged_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/SyncListLocations.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/SyncListLocations.java new file mode 100644 index 000000000000..f04813c51b91 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/listlocations/SyncListLocations.java @@ -0,0 +1,50 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_sync] +import com.google.cloud.location.ListLocationsRequest; +import com.google.cloud.location.Location; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; + +public class SyncListLocations { + + public static void main(String[] args) throws Exception { + syncListLocations(); + } + + public static void syncListLocations() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ListLocationsRequest request = + ListLocationsRequest.newBuilder() + .setName("name3373707") + .setFilter("filter-1274492040") + .setPageSize(883849137) + .setPageToken("pageToken873572522") + .build(); + for (Location element : managedKafkaConnectClient.listLocations(request).iterateAll()) { + // doThingsWith(element); + } + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ListLocations_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/AsyncPauseConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/AsyncPauseConnector.java new file mode 100644 index 000000000000..05293de59fbd --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/AsyncPauseConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; + +public class AsyncPauseConnector { + + public static void main(String[] args) throws Exception { + asyncPauseConnector(); + } + + public static void asyncPauseConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + PauseConnectorRequest request = + PauseConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.pauseConnectorCallable().futureCall(request); + // Do something. + PauseConnectorResponse response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnector.java new file mode 100644 index 000000000000..505e7d7123a0 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.PauseConnectorRequest; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; + +public class SyncPauseConnector { + + public static void main(String[] args) throws Exception { + syncPauseConnector(); + } + + public static void syncPauseConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + PauseConnectorRequest request = + PauseConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorConnectorname.java new file mode 100644 index 000000000000..3ecd03e85b89 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; + +public class SyncPauseConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncPauseConnectorConnectorname(); + } + + public static void syncPauseConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorString.java new file mode 100644 index 000000000000..861ec9768048 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/pauseconnector/SyncPauseConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_String_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.PauseConnectorResponse; + +public class SyncPauseConnectorString { + + public static void main(String[] args) throws Exception { + syncPauseConnectorString(); + } + + public static void syncPauseConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + PauseConnectorResponse response = managedKafkaConnectClient.pauseConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_PauseConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/AsyncRestartConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/AsyncRestartConnector.java new file mode 100644 index 000000000000..3d237b6bd6b6 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/AsyncRestartConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; + +public class AsyncRestartConnector { + + public static void main(String[] args) throws Exception { + asyncRestartConnector(); + } + + public static void asyncRestartConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + RestartConnectorRequest request = + RestartConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.restartConnectorCallable().futureCall(request); + // Do something. + RestartConnectorResponse response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnector.java new file mode 100644 index 000000000000..8dcf8e6120fc --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.RestartConnectorRequest; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; + +public class SyncRestartConnector { + + public static void main(String[] args) throws Exception { + syncRestartConnector(); + } + + public static void syncRestartConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + RestartConnectorRequest request = + RestartConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorConnectorname.java new file mode 100644 index 000000000000..29553fdd3418 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; + +public class SyncRestartConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncRestartConnectorConnectorname(); + } + + public static void syncRestartConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorString.java new file mode 100644 index 000000000000..aa646c448fc7 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/restartconnector/SyncRestartConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_String_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.RestartConnectorResponse; + +public class SyncRestartConnectorString { + + public static void main(String[] args) throws Exception { + syncRestartConnectorString(); + } + + public static void syncRestartConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + RestartConnectorResponse response = managedKafkaConnectClient.restartConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_RestartConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/AsyncResumeConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/AsyncResumeConnector.java new file mode 100644 index 000000000000..5c6220db91cb --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/AsyncResumeConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; + +public class AsyncResumeConnector { + + public static void main(String[] args) throws Exception { + asyncResumeConnector(); + } + + public static void asyncResumeConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ResumeConnectorRequest request = + ResumeConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.resumeConnectorCallable().futureCall(request); + // Do something. + ResumeConnectorResponse response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnector.java new file mode 100644 index 000000000000..be1fe37fe2fb --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ResumeConnectorRequest; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; + +public class SyncResumeConnector { + + public static void main(String[] args) throws Exception { + syncResumeConnector(); + } + + public static void syncResumeConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ResumeConnectorRequest request = + ResumeConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorConnectorname.java new file mode 100644 index 000000000000..0dcc783d9a5a --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; + +public class SyncResumeConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncResumeConnectorConnectorname(); + } + + public static void syncResumeConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorString.java new file mode 100644 index 000000000000..49e753aa1446 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/resumeconnector/SyncResumeConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_String_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.ResumeConnectorResponse; + +public class SyncResumeConnectorString { + + public static void main(String[] args) throws Exception { + syncResumeConnectorString(); + } + + public static void syncResumeConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + ResumeConnectorResponse response = managedKafkaConnectClient.resumeConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_ResumeConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/AsyncStopConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/AsyncStopConnector.java new file mode 100644 index 000000000000..67c6d578c79d --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/AsyncStopConnector.java @@ -0,0 +1,52 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; + +public class AsyncStopConnector { + + public static void main(String[] args) throws Exception { + asyncStopConnector(); + } + + public static void asyncStopConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + StopConnectorRequest request = + StopConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + ApiFuture future = + managedKafkaConnectClient.stopConnectorCallable().futureCall(request); + // Do something. + StopConnectorResponse response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnector.java new file mode 100644 index 000000000000..062fd77a14e5 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.StopConnectorRequest; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; + +public class SyncStopConnector { + + public static void main(String[] args) throws Exception { + syncStopConnector(); + } + + public static void syncStopConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + StopConnectorRequest request = + StopConnectorRequest.newBuilder() + .setName( + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString()) + .build(); + StopConnectorResponse response = managedKafkaConnectClient.stopConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorConnectorname.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorConnectorname.java new file mode 100644 index 000000000000..6b7cb6f88c53 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorConnectorname.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_Connectorname_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; + +public class SyncStopConnectorConnectorname { + + public static void main(String[] args) throws Exception { + syncStopConnectorConnectorname(); + } + + public static void syncStopConnectorConnectorname() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectorName name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]"); + StopConnectorResponse response = managedKafkaConnectClient.stopConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_Connectorname_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorString.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorString.java new file mode 100644 index 000000000000..e4eeee3007f8 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/stopconnector/SyncStopConnectorString.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_String_sync] +import com.google.cloud.managedkafka.v1.ConnectorName; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.StopConnectorResponse; + +public class SyncStopConnectorString { + + public static void main(String[] args) throws Exception { + syncStopConnectorString(); + } + + public static void syncStopConnectorString() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + String name = + ConnectorName.of("[PROJECT]", "[LOCATION]", "[CONNECT_CLUSTER]", "[CONNECTOR]") + .toString(); + StopConnectorResponse response = managedKafkaConnectClient.stopConnector(name); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_StopConnector_String_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectCluster.java new file mode 100644 index 000000000000..21f20f2b58c4 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectCluster.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.longrunning.Operation; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateConnectCluster { + + public static void main(String[] args) throws Exception { + asyncUpdateConnectCluster(); + } + + public static void asyncUpdateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + UpdateConnectClusterRequest request = + UpdateConnectClusterRequest.newBuilder() + .setUpdateMask(FieldMask.newBuilder().build()) + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + ApiFuture future = + managedKafkaConnectClient.updateConnectClusterCallable().futureCall(request); + // Do something. + Operation response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectClusterLRO.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectClusterLRO.java new file mode 100644 index 000000000000..d80854d5c85f --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/AsyncUpdateConnectClusterLRO.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_LRO_async] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateConnectClusterLRO { + + public static void main(String[] args) throws Exception { + asyncUpdateConnectClusterLRO(); + } + + public static void asyncUpdateConnectClusterLRO() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + UpdateConnectClusterRequest request = + UpdateConnectClusterRequest.newBuilder() + .setUpdateMask(FieldMask.newBuilder().build()) + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + OperationFuture future = + managedKafkaConnectClient.updateConnectClusterOperationCallable().futureCall(request); + // Do something. + ConnectCluster response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_LRO_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectCluster.java new file mode 100644 index 000000000000..e63d80a0c7d7 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectCluster.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.UpdateConnectClusterRequest; +import com.google.protobuf.FieldMask; + +public class SyncUpdateConnectCluster { + + public static void main(String[] args) throws Exception { + syncUpdateConnectCluster(); + } + + public static void syncUpdateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + UpdateConnectClusterRequest request = + UpdateConnectClusterRequest.newBuilder() + .setUpdateMask(FieldMask.newBuilder().build()) + .setConnectCluster(ConnectCluster.newBuilder().build()) + .setRequestId("requestId693933066") + .build(); + ConnectCluster response = managedKafkaConnectClient.updateConnectClusterAsync(request).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectClusterConnectclusterFieldmask.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectClusterConnectclusterFieldmask.java new file mode 100644 index 000000000000..85ad6e6ab073 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnectcluster/SyncUpdateConnectClusterConnectclusterFieldmask.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_ConnectclusterFieldmask_sync] +import com.google.cloud.managedkafka.v1.ConnectCluster; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.FieldMask; + +public class SyncUpdateConnectClusterConnectclusterFieldmask { + + public static void main(String[] args) throws Exception { + syncUpdateConnectClusterConnectclusterFieldmask(); + } + + public static void syncUpdateConnectClusterConnectclusterFieldmask() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + ConnectCluster connectCluster = ConnectCluster.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + ConnectCluster response = + managedKafkaConnectClient.updateConnectClusterAsync(connectCluster, updateMask).get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnectCluster_ConnectclusterFieldmask_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/AsyncUpdateConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/AsyncUpdateConnector.java new file mode 100644 index 000000000000..5507945d7957 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/AsyncUpdateConnector.java @@ -0,0 +1,51 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_async] +import com.google.api.core.ApiFuture; +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.protobuf.FieldMask; + +public class AsyncUpdateConnector { + + public static void main(String[] args) throws Exception { + asyncUpdateConnector(); + } + + public static void asyncUpdateConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + UpdateConnectorRequest request = + UpdateConnectorRequest.newBuilder() + .setUpdateMask(FieldMask.newBuilder().build()) + .setConnector(Connector.newBuilder().build()) + .build(); + ApiFuture future = + managedKafkaConnectClient.updateConnectorCallable().futureCall(request); + // Do something. + Connector response = future.get(); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_async] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnector.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnector.java new file mode 100644 index 000000000000..1e3fa3893ff4 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnector.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_sync] +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.cloud.managedkafka.v1.UpdateConnectorRequest; +import com.google.protobuf.FieldMask; + +public class SyncUpdateConnector { + + public static void main(String[] args) throws Exception { + syncUpdateConnector(); + } + + public static void syncUpdateConnector() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + UpdateConnectorRequest request = + UpdateConnectorRequest.newBuilder() + .setUpdateMask(FieldMask.newBuilder().build()) + .setConnector(Connector.newBuilder().build()) + .build(); + Connector response = managedKafkaConnectClient.updateConnector(request); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnectorConnectorFieldmask.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnectorConnectorFieldmask.java new file mode 100644 index 000000000000..a83c00113946 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnect/updateconnector/SyncUpdateConnectorConnectorFieldmask.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_ConnectorFieldmask_sync] +import com.google.cloud.managedkafka.v1.Connector; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectClient; +import com.google.protobuf.FieldMask; + +public class SyncUpdateConnectorConnectorFieldmask { + + public static void main(String[] args) throws Exception { + syncUpdateConnectorConnectorFieldmask(); + } + + public static void syncUpdateConnectorConnectorFieldmask() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + try (ManagedKafkaConnectClient managedKafkaConnectClient = ManagedKafkaConnectClient.create()) { + Connector connector = Connector.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + Connector response = managedKafkaConnectClient.updateConnector(connector, updateMask); + } + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnect_UpdateConnector_ConnectorFieldmask_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/createconnectcluster/SyncCreateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/createconnectcluster/SyncCreateConnectCluster.java new file mode 100644 index 000000000000..778e4ac012d0 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/createconnectcluster/SyncCreateConnectCluster.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnectSettings_CreateConnectCluster_sync] +import com.google.api.gax.longrunning.OperationalTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectSettings; +import java.time.Duration; + +public class SyncCreateConnectCluster { + + public static void main(String[] args) throws Exception { + syncCreateConnectCluster(); + } + + public static void syncCreateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectSettings.Builder managedKafkaConnectSettingsBuilder = + ManagedKafkaConnectSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = + OperationalTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(5000)) + .setTotalTimeoutDuration(Duration.ofHours(24)) + .build()); + managedKafkaConnectSettingsBuilder + .createClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm) + .build(); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnectSettings_CreateConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/getconnectcluster/SyncGetConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/getconnectcluster/SyncGetConnectCluster.java new file mode 100644 index 000000000000..d949fd5bc7a0 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/managedkafkaconnectsettings/getconnectcluster/SyncGetConnectCluster.java @@ -0,0 +1,57 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnectSettings_GetConnectCluster_sync] +import com.google.cloud.managedkafka.v1.ManagedKafkaConnectSettings; +import java.time.Duration; + +public class SyncGetConnectCluster { + + public static void main(String[] args) throws Exception { + syncGetConnectCluster(); + } + + public static void syncGetConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectSettings.Builder managedKafkaConnectSettingsBuilder = + ManagedKafkaConnectSettings.newBuilder(); + managedKafkaConnectSettingsBuilder + .getConnectClusterSettings() + .setRetrySettings( + managedKafkaConnectSettingsBuilder + .getConnectClusterSettings() + .getRetrySettings() + .toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(5)) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofSeconds(30)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(60)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofSeconds(300)) + .build()); + ManagedKafkaConnectSettings managedKafkaConnectSettings = + managedKafkaConnectSettingsBuilder.build(); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnectSettings_GetConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/createconnectcluster/SyncCreateConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/createconnectcluster/SyncCreateConnectCluster.java new file mode 100644 index 000000000000..7b32154677ad --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/createconnectcluster/SyncCreateConnectCluster.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnectStubSettings_CreateConnectCluster_sync] +import com.google.api.gax.longrunning.OperationalTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; +import com.google.cloud.managedkafka.v1.stub.ManagedKafkaConnectStubSettings; +import java.time.Duration; + +public class SyncCreateConnectCluster { + + public static void main(String[] args) throws Exception { + syncCreateConnectCluster(); + } + + public static void syncCreateConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectStubSettings.Builder managedKafkaConnectSettingsBuilder = + ManagedKafkaConnectStubSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = + OperationalTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(5000)) + .setTotalTimeoutDuration(Duration.ofHours(24)) + .build()); + managedKafkaConnectSettingsBuilder + .createClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm) + .build(); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnectStubSettings_CreateConnectCluster_sync] diff --git a/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/getconnectcluster/SyncGetConnectCluster.java b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/getconnectcluster/SyncGetConnectCluster.java new file mode 100644 index 000000000000..c3c2176c4e15 --- /dev/null +++ b/java-managedkafka/samples/snippets/generated/com/google/cloud/managedkafka/v1/stub/managedkafkaconnectstubsettings/getconnectcluster/SyncGetConnectCluster.java @@ -0,0 +1,57 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.managedkafka.v1.stub.samples; + +// [START managedkafka_v1_generated_ManagedKafkaConnectStubSettings_GetConnectCluster_sync] +import com.google.cloud.managedkafka.v1.stub.ManagedKafkaConnectStubSettings; +import java.time.Duration; + +public class SyncGetConnectCluster { + + public static void main(String[] args) throws Exception { + syncGetConnectCluster(); + } + + public static void syncGetConnectCluster() throws Exception { + // This snippet has been automatically generated and should be regarded as a code template only. + // It will require modifications to work: + // - It may require correct/in-range values for request initialization. + // - It may require specifying regional endpoints when creating the service client as shown in + // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library + ManagedKafkaConnectStubSettings.Builder managedKafkaConnectSettingsBuilder = + ManagedKafkaConnectStubSettings.newBuilder(); + managedKafkaConnectSettingsBuilder + .getConnectClusterSettings() + .setRetrySettings( + managedKafkaConnectSettingsBuilder + .getConnectClusterSettings() + .getRetrySettings() + .toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(5)) + .setMaxAttempts(5) + .setMaxRetryDelayDuration(Duration.ofSeconds(30)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(60)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofSeconds(300)) + .build()); + ManagedKafkaConnectStubSettings managedKafkaConnectSettings = + managedKafkaConnectSettingsBuilder.build(); + } +} +// [END managedkafka_v1_generated_ManagedKafkaConnectStubSettings_GetConnectCluster_sync] diff --git a/java-vertexai/google-cloud-vertexai-bom/pom.xml b/java-vertexai/google-cloud-vertexai-bom/pom.xml index d3ebe96e2578..ea1c1e5e25f1 100644 --- a/java-vertexai/google-cloud-vertexai-bom/pom.xml +++ b/java-vertexai/google-cloud-vertexai-bom/pom.xml @@ -3,13 +3,13 @@ - + @@ -27,17 +27,17 @@ 4.0.0com.google.cloudgoogle-cloud-vertexai-bom1.20.01.20.1pom - + com.google.cloudgoogle-cloud-pom-parent1.54.01.54.1../../google-cloud-pom-parent/pom.xml - + com.google.cloudgoogle-cloud-vertexai1.20.01.20.1 - + com.google.api.grpcgrpc-google-cloud-vertexai-v11.20.01.20.1 - + com.google.api.grpcproto-google-cloud-vertexai-v11.20.01.20.1
diff --git a/java-vertexai/google-cloud-vertexai/pom.xml b/java-vertexai/google-cloud-vertexai/pom.xml index 097e738a5001..6f5ee7d6c3a9 100644 --- a/java-vertexai/google-cloud-vertexai/pom.xml +++ b/java-vertexai/google-cloud-vertexai/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-vertexai - 1.20.0 + 1.20.1 jar Google VertexAI API VertexAI API Vertex AI is an integrated suite of machine learning tools and services @@ -12,7 +12,7 @@ com.google.cloud google-cloud-vertexai-parent - 1.20.0 + 1.20.1 google-cloud-vertexai diff --git a/java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/VertexAI.java b/java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/VertexAI.java index a6886e439450..8284e5821d88 100644 --- a/java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/VertexAI.java +++ b/java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/VertexAI.java @@ -360,13 +360,6 @@ private PredictionServiceSettings getPredictionServiceSettings() throws IOExcept builder.setEndpoint(String.format("%s:443", apiEndpoint)); builder.setCredentialsProvider(credentialsProvider); - HeaderProvider headerProvider = - FixedHeaderProvider.create( - "user-agent", - String.format( - "%s/%s", - Constants.USER_AGENT_HEADER, - GaxProperties.getLibraryVersion(PredictionServiceSettings.class))); builder.setHeaderProvider(headerProvider); return builder.build(); } @@ -435,13 +428,6 @@ private LlmUtilityServiceSettings getLlmUtilityServiceClientSettings() throws IO settingsBuilder.setEndpoint(String.format("%s:443", apiEndpoint)); settingsBuilder.setCredentialsProvider(credentialsProvider); - HeaderProvider headerProvider = - FixedHeaderProvider.create( - "user-agent", - String.format( - "%s/%s", - Constants.USER_AGENT_HEADER, - GaxProperties.getLibraryVersion(LlmUtilityServiceSettings.class))); settingsBuilder.setHeaderProvider(headerProvider); return settingsBuilder.build(); } diff --git a/java-vertexai/google-cloud-vertexai/src/test/java/com/google/cloud/vertexai/VertexAITest.java b/java-vertexai/google-cloud-vertexai/src/test/java/com/google/cloud/vertexai/VertexAITest.java index 7baddcf7b088..58a20773dc30 100644 --- a/java-vertexai/google-cloud-vertexai/src/test/java/com/google/cloud/vertexai/VertexAITest.java +++ b/java-vertexai/google-cloud-vertexai/src/test/java/com/google/cloud/vertexai/VertexAITest.java @@ -25,6 +25,8 @@ import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.vertexai.api.LlmUtilityServiceClient; +import com.google.cloud.vertexai.api.LlmUtilityServiceSettings; import com.google.cloud.vertexai.api.PredictionServiceClient; import com.google.cloud.vertexai.api.PredictionServiceSettings; import com.google.common.collect.ImmutableList; @@ -58,6 +60,8 @@ public final class VertexAITest { @Mock private PredictionServiceClient mockPredictionServiceClient; + @Mock private LlmUtilityServiceClient mockLlmUtilityServiceClient; + @Mock private GoogleCredentialsProvider.Builder mockCredentialsProviderBuilder; @Mock private GoogleCredentialsProvider mockCredentialsProvider; @@ -425,6 +429,20 @@ public void testInstantiateVertexAI_builderWithCustomHeaders_shouldContainRightF Constants.USER_AGENT_HEADER, GaxProperties.getLibraryVersion(PredictionServiceSettings.class))); assertThat(vertexAi.getHeaders()).isEqualTo(expectedHeaders); + + // make sure the custom headers are set correctly in the prediction service client + try (MockedStatic mockStatic = mockStatic(PredictionServiceClient.class)) { + mockStatic + .when(() -> PredictionServiceClient.create(any(PredictionServiceSettings.class))) + .thenReturn(mockPredictionServiceClient); + PredictionServiceClient unused = vertexAi.getPredictionServiceClient(); + + ArgumentCaptor settings = + ArgumentCaptor.forClass(PredictionServiceSettings.class); + mockStatic.verify(() -> PredictionServiceClient.create(settings.capture())); + + assertThat(settings.getValue().getHeaderProvider().getHeaders()).isEqualTo(expectedHeaders); + } } @Test @@ -454,5 +472,19 @@ public void testInstantiateVertexAI_builderWithCustomHeaders_shouldContainRightF GaxProperties.getLibraryVersion(PredictionServiceSettings.class), "test_value")); assertThat(vertexAi.getHeaders()).isEqualTo(expectedHeaders); + + // make sure the custom headers are set correctly in the llm utility service client + try (MockedStatic mockStatic = mockStatic(LlmUtilityServiceClient.class)) { + mockStatic + .when(() -> LlmUtilityServiceClient.create(any(LlmUtilityServiceSettings.class))) + .thenReturn(mockLlmUtilityServiceClient); + LlmUtilityServiceClient unused = vertexAi.getLlmUtilityClient(); + + ArgumentCaptor settings = + ArgumentCaptor.forClass(LlmUtilityServiceSettings.class); + mockStatic.verify(() -> LlmUtilityServiceClient.create(settings.capture())); + + assertThat(settings.getValue().getHeaderProvider().getHeaders()).isEqualTo(expectedHeaders); + } } } diff --git a/java-vertexai/grpc-google-cloud-vertexai-v1/pom.xml b/java-vertexai/grpc-google-cloud-vertexai-v1/pom.xml index 4ed184a71211..b8f83ad6fc07 100644 --- a/java-vertexai/grpc-google-cloud-vertexai-v1/pom.xml +++ b/java-vertexai/grpc-google-cloud-vertexai-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-vertexai-v1 - 1.20.0 + 1.20.1 grpc-google-cloud-vertexai-v1 GRPC library for google-cloud-vertexai com.google.cloud google-cloud-vertexai-parent - 1.20.0 + 1.20.1 diff --git a/java-vertexai/pom.xml b/java-vertexai/pom.xml index 5a52d90f9ec7..b6d57103871a 100644 --- a/java-vertexai/pom.xml +++ b/java-vertexai/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-vertexai-parent pom - 1.20.0 + 1.20.1 Google VertexAI API Parent Java idiomatic client for Google Cloud Platform services. @@ -13,7 +13,7 @@ com.google.cloud google-cloud-jar-parent - 1.54.0 + 1.54.1 ../google-cloud-jar-parent/pom.xml @@ -29,17 +29,17 @@ com.google.cloud google-cloud-vertexai - 1.20.0 + 1.20.1 com.google.api.grpc grpc-google-cloud-vertexai-v1 - 1.20.0 + 1.20.1 com.google.api.grpc proto-google-cloud-vertexai-v1 - 1.20.0 + 1.20.1 diff --git a/java-vertexai/proto-google-cloud-vertexai-v1/pom.xml b/java-vertexai/proto-google-cloud-vertexai-v1/pom.xml index 5ddf4d1f376d..a7e2ecf43943 100644 --- a/java-vertexai/proto-google-cloud-vertexai-v1/pom.xml +++ b/java-vertexai/proto-google-cloud-vertexai-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-vertexai-v1 - 1.20.0 + 1.20.1 proto-google-cloud-vertexai-v1 Proto library for google-cloud-vertexai com.google.cloud google-cloud-vertexai-parent - 1.20.0 + 1.20.1 diff --git a/versions.txt b/versions.txt index 007199cbaf5f..c852bc00f2fe 100644 --- a/versions.txt +++ b/versions.txt @@ -1,7 +1,7 @@ # Format: # module:released-version:current-version -google-cloud-java:1.54.0:1.54.0 +google-cloud-java:1.54.2:1.54.2 google-cloud-accessapproval:2.61.0:2.61.0 grpc-google-cloud-accessapproval-v1:2.61.0:2.61.0 proto-google-cloud-accessapproval-v1:2.61.0:2.61.0 @@ -690,11 +690,11 @@ grpc-google-cloud-telcoautomation-v1alpha1:0.30.0:0.30.0 google-cloud-securesourcemanager:0.30.0:0.30.0 proto-google-cloud-securesourcemanager-v1:0.30.0:0.30.0 grpc-google-cloud-securesourcemanager-v1:0.30.0:0.30.0 -google-cloud-vertexai:1.20.0:1.20.0 -proto-google-cloud-vertexai-v1:1.20.0:1.20.0 -proto-google-cloud-vertexai-v1beta1:1.20.0:1.20.0 -grpc-google-cloud-vertexai-v1:1.20.0:1.20.0 -grpc-google-cloud-vertexai-v1beta1:1.20.0:1.20.0 +google-cloud-vertexai:1.20.1:1.20.1 +proto-google-cloud-vertexai-v1:1.20.1:1.20.1 +proto-google-cloud-vertexai-v1beta1:1.20.1:1.20.1 +grpc-google-cloud-vertexai-v1:1.20.1:1.20.1 +grpc-google-cloud-vertexai-v1beta1:1.20.1:1.20.1 google-cloud-edgenetwork:0.28.0:0.28.0 proto-google-cloud-edgenetwork-v1:0.28.0:0.28.0 grpc-google-cloud-edgenetwork-v1:0.28.0:0.28.0 @@ -776,9 +776,9 @@ grpc-google-cloud-developerconnect-v1:0.17.0:0.17.0 google-cloud-iap:0.16.0:0.16.0 proto-google-cloud-iap-v1:0.16.0:0.16.0 grpc-google-cloud-iap-v1:0.16.0:0.16.0 -google-cloud-managedkafka:0.16.0:0.16.0 -proto-google-cloud-managedkafka-v1:0.16.0:0.16.0 -grpc-google-cloud-managedkafka-v1:0.16.0:0.16.0 +google-cloud-managedkafka:0.16.1:0.16.1 +proto-google-cloud-managedkafka-v1:0.16.1:0.16.1 +grpc-google-cloud-managedkafka-v1:0.16.1:0.16.1 google-cloud-networkservices:0.16.0:0.16.0 proto-google-cloud-networkservices-v1:0.16.0:0.16.0 grpc-google-cloud-networkservices-v1:0.16.0:0.16.0