diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..b0768eaf4 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,101 @@ +# Top-most EditorConfig file +root = true + +# All files +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +# C# files +[*.cs] + +# New line preferences +csharp_new_line_before_open_brace = all +csharp_new_line_before_else = true +csharp_new_line_before_catch = true +csharp_new_line_before_finally = true +csharp_new_line_before_members_in_object_initializers = true +csharp_new_line_before_members_in_anonymous_types = true +csharp_new_line_between_query_expression_clauses = true + +# Indentation preferences +csharp_indent_case_contents = true +csharp_indent_switch_labels = true +csharp_indent_labels = flush_left + +# Space preferences +csharp_space_after_cast = false +csharp_space_after_keywords_in_control_flow_statements = true +csharp_space_between_method_declaration_parameter_list_parentheses = false +csharp_space_between_method_call_parameter_list_parentheses = false +csharp_space_between_parentheses = false +csharp_space_before_colon_in_inheritance_clause = true +csharp_space_after_colon_in_inheritance_clause = true +csharp_space_around_binary_operators = before_and_after +csharp_space_between_method_declaration_empty_parameter_list_parentheses = false +csharp_space_between_method_call_name_and_opening_parenthesis = false +csharp_space_between_method_call_empty_parameter_list_parentheses = false + +# Wrapping preferences +csharp_preserve_single_line_statements = true +csharp_preserve_single_line_blocks = true + +# Expression-bodied members +csharp_style_expression_bodied_methods = false:none +csharp_style_expression_bodied_constructors = false:none +csharp_style_expression_bodied_operators = false:none +csharp_style_expression_bodied_properties = true:none +csharp_style_expression_bodied_indexers = true:none +csharp_style_expression_bodied_accessors = true:none + +# Pattern matching preferences +csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion +csharp_style_pattern_matching_over_as_with_null_check = true:suggestion + +# Null-checking preferences +csharp_style_throw_expression = true:suggestion +csharp_style_conditional_delegate_call = true:suggestion + +# Modifier preferences +csharp_preferred_modifier_order = public,private,protected,internal,static,extern,new,virtual,abstract,sealed,override,readonly,unsafe,volatile,async:suggestion + +# Expression-level preferences +csharp_prefer_braces = true:none +csharp_style_deconstructed_variable_declaration = true:suggestion +csharp_prefer_simple_default_expression = true:suggestion +csharp_style_pattern_local_over_anonymous_function = true:suggestion +csharp_style_inlined_variable_declaration = true:suggestion + +# Naming Conventions +# Interfaces should begin with an I +dotnet_naming_rule.interface_should_begin_with_i.severity = suggestion +dotnet_naming_rule.interface_should_begin_with_i.symbols = interface +dotnet_naming_rule.interface_should_begin_with_i.style = begins_with_i + +dotnet_naming_symbols.interface.applicable_kinds = interface +dotnet_naming_symbols.interface.applicable_accessibilities = public, internal, private, protected, protected_internal + +dotnet_naming_style.begins_with_i.required_prefix = I +dotnet_naming_style.begins_with_i.capitalization = pascal_case + +# Types should be PascalCase +dotnet_naming_rule.types_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.types_should_be_pascal_case.symbols = types +dotnet_naming_rule.types_should_be_pascal_case.style = pascal_case + +dotnet_naming_symbols.types.applicable_kinds = class, struct, interface, enum +dotnet_naming_symbols.types.applicable_accessibilities = public, internal, private, protected, protected_internal + +dotnet_naming_style.pascal_case.capitalization = pascal_case + +# Non-field members should be PascalCase +dotnet_naming_rule.non_field_members_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.non_field_members_should_be_pascal_case.symbols = non_field_members +dotnet_naming_rule.non_field_members_should_be_pascal_case.style = pascal_case + +dotnet_naming_symbols.non_field_members.applicable_kinds = property, event, method +dotnet_naming_symbols.non_field_members.applicable_accessibilities = public, internal, private, protected, protected_internal diff --git a/examples/AdminClient/Program.cs b/examples/AdminClient/Program.cs index e7330ba5b..7b3f101ec 100644 --- a/examples/AdminClient/Program.cs +++ b/examples/AdminClient/Program.cs @@ -92,7 +92,7 @@ static async Task CreateTopicAsync(string bootstrapServers, string[] commandArgs { try { - await adminClient.CreateTopicsAsync(new TopicSpecification[] { + await adminClient.CreateTopicsAsync(new TopicSpecification[] { new TopicSpecification { Name = topicName, ReplicationFactor = 1, NumPartitions = 1 } }); } catch (CreateTopicsException e) @@ -168,23 +168,24 @@ static List ParseUserScramCredentialAlterations( { if (args.Length == 0) { - Console.WriteLine("usage: .. alter-user-scram-alterations " + + Console.WriteLine("usage: .. alter-user-scram-alterations " + "UPSERT " + "[UPSERT " + "DELETE ..]"); Environment.ExitCode = 1; return null; } - + var alterations = new List(); - for (int i = 0; i < args.Length;) { + for (int i = 0; i < args.Length;) + { string alterationName = args[i]; if (alterationName == "UPSERT") { if (i + 5 >= args.Length) { throw new ArgumentException( - $"invalid number of arguments for alteration {alterations.Count},"+ + $"invalid number of arguments for alteration {alterations.Count}," + $" expected 5, got {args.Length - i - 1}"); } @@ -218,7 +219,7 @@ static List ParseUserScramCredentialAlterations( if (i + 2 >= args.Length) { throw new ArgumentException( - $"invalid number of arguments for alteration {alterations.Count},"+ + $"invalid number of arguments for alteration {alterations.Count}," + $" expected 2, got {args.Length - i - 1}"); } @@ -246,34 +247,34 @@ static Tuple> ParseListOffsetsArg { if (args.Length == 0) { - Console.WriteLine("usage: .. list-offsets " + + Console.WriteLine("usage: .. list-offsets " + " .."); Environment.ExitCode = 1; return null; } - + var isolationLevel = Enum.Parse(args[0]); var topicPartitionOffsetSpecs = new List(); for (int i = 1; i < args.Length;) { - if (args.Length < i+3) + if (args.Length < i + 3) { throw new ArgumentException($"Invalid number of arguments for topicPartitionOffsetSpec[{topicPartitionOffsetSpecs.Count}]: {args.Length - i}"); } - + string topic = args[i]; var partition = Int32.Parse(args[i + 1]); var offsetSpec = args[i + 2]; if (offsetSpec == "TIMESTAMP") { - if (args.Length < i+4) + if (args.Length < i + 4) { throw new ArgumentException($"Invalid number of arguments for topicPartitionOffsetSpec[{topicPartitionOffsetSpecs.Count}]: {args.Length - i}"); } - + var timestamp = Int64.Parse(args[i + 3]); i = i + 1; - topicPartitionOffsetSpecs.Add( new TopicPartitionOffsetSpec + topicPartitionOffsetSpecs.Add(new TopicPartitionOffsetSpec { TopicPartition = new TopicPartition(topic, new Partition(partition)), OffsetSpec = OffsetSpec.ForTimestamp(timestamp) @@ -281,7 +282,7 @@ static Tuple> ParseListOffsetsArg } else if (offsetSpec == "MAX_TIMESTAMP") { - topicPartitionOffsetSpecs.Add( new TopicPartitionOffsetSpec + topicPartitionOffsetSpecs.Add(new TopicPartitionOffsetSpec { TopicPartition = new TopicPartition(topic, new Partition(partition)), OffsetSpec = OffsetSpec.MaxTimestamp() @@ -289,7 +290,7 @@ static Tuple> ParseListOffsetsArg } else if (offsetSpec == "EARLIEST") { - topicPartitionOffsetSpecs.Add( new TopicPartitionOffsetSpec + topicPartitionOffsetSpecs.Add(new TopicPartitionOffsetSpec { TopicPartition = new TopicPartition(topic, new Partition(partition)), OffsetSpec = OffsetSpec.Earliest() @@ -297,7 +298,7 @@ static Tuple> ParseListOffsetsArg } else if (offsetSpec == "LATEST") { - topicPartitionOffsetSpecs.Add( new TopicPartitionOffsetSpec + topicPartitionOffsetSpecs.Add(new TopicPartitionOffsetSpec { TopicPartition = new TopicPartition(topic, new Partition(partition)), OffsetSpec = OffsetSpec.Latest() @@ -313,9 +314,9 @@ static Tuple> ParseListOffsetsArg return Tuple.Create(isolationLevel, topicPartitionOffsetSpecs); } - static void PrintListOffsetsResultInfos(List ListOffsetsResultInfos) - { - foreach(var listOffsetsResultInfo in ListOffsetsResultInfos) + static void PrintListOffsetsResultInfos(List ListOffsetsResultInfos) + { + foreach (var listOffsetsResultInfo in ListOffsetsResultInfos) { Console.WriteLine(" ListOffsetsResultInfo:"); Console.WriteLine($" TopicPartitionOffsetError: {listOffsetsResultInfo.TopicPartitionOffsetError}"); @@ -493,7 +494,7 @@ static async Task AlterConsumerGroupOffsetsAsync(string bootstrapServers, string { var results = await adminClient.AlterConsumerGroupOffsetsAsync(input); Console.WriteLine("Successfully altered offsets:"); - foreach(var groupResult in results) + foreach (var groupResult in results) { Console.WriteLine(groupResult); } @@ -542,7 +543,7 @@ static async Task ListConsumerGroupOffsetsAsync(string bootstrapServers, string[ return; } } - if(!tpes.Any()) + if (!tpes.Any()) { // In case the list is empty, request offsets for all the partitions. tpes = null; @@ -556,7 +557,7 @@ static async Task ListConsumerGroupOffsetsAsync(string bootstrapServers, string[ { var result = await adminClient.ListConsumerGroupOffsetsAsync(input); Console.WriteLine("Successfully listed offsets:"); - foreach(var groupResult in result) + foreach (var groupResult in result) { Console.WriteLine(groupResult); } @@ -608,7 +609,7 @@ static async Task ListConsumerGroupsAsync(string bootstrapServers, string[] comm try { var result = await adminClient.ListConsumerGroupsAsync(new ListConsumerGroupsOptions() - { + { RequestTimeout = timeout, MatchStates = statesList, }); @@ -640,7 +641,7 @@ static async Task DescribeConsumerGroupsAsync(string bootstrapServers, string[] var password = commandArgs[1]; var includeAuthorizedOperations = (commandArgs[2] == "1"); var groupNames = commandArgs.Skip(3).ToList(); - + if (string.IsNullOrWhiteSpace(username)) { username = null; @@ -671,7 +672,7 @@ static async Task DescribeConsumerGroupsAsync(string bootstrapServers, string[] { try { - var descResult = await adminClient.DescribeConsumerGroupsAsync(groupNames, new DescribeConsumerGroupsOptions() { RequestTimeout = timeout , IncludeAuthorizedOperations = includeAuthorizedOperations}); + var descResult = await adminClient.DescribeConsumerGroupsAsync(groupNames, new DescribeConsumerGroupsOptions() { RequestTimeout = timeout, IncludeAuthorizedOperations = includeAuthorizedOperations }); foreach (var group in descResult.ConsumerGroupDescriptions) { Console.WriteLine($"\n Group: {group.GroupId} {group.Error}"); @@ -705,7 +706,7 @@ static async Task DescribeConsumerGroupsAsync(string bootstrapServers, string[] } } } - + static async Task IncrementalAlterConfigsAsync(string bootstrapServers, string[] commandArgs) { var timeout = TimeSpan.FromSeconds(30); @@ -720,8 +721,8 @@ static async Task IncrementalAlterConfigsAsync(string bootstrapServers, string[] { throw new ArgumentException("invalid arguments length"); } - - for (int i = 1; i < commandArgs.Length; i+=3) + + for (int i = 1; i < commandArgs.Length; i += 3) { var resourceType = Enum.Parse(commandArgs[i]); var resourceName = commandArgs[i + 1]; @@ -734,14 +735,14 @@ static async Task IncrementalAlterConfigsAsync(string bootstrapServers, string[] { throw new ArgumentException($"invalid alteration name \"{config}\""); } - + var name = nameOpValue[0]; var opValue = nameOpValue[1].Split(":"); if (opValue.Length != 2) { throw new ArgumentException($"invalid alteration value \"{nameOpValue[1]}\""); } - + var op = Enum.Parse(opValue[0]); var value = opValue[1]; configList.Add(new ConfigEntry @@ -759,7 +760,7 @@ static async Task IncrementalAlterConfigsAsync(string bootstrapServers, string[] configResourceList[resource] = configList; } } - catch (Exception e) when ( + catch (Exception e) when ( e is ArgumentException || e is FormatException ) @@ -769,7 +770,7 @@ e is FormatException Environment.ExitCode = 1; return; } - + using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) { try @@ -797,7 +798,7 @@ e is FormatException } static async Task DescribeUserScramCredentialsAsync(string bootstrapServers, string[] commandArgs) - { + { var users = commandArgs.ToList(); var timeout = TimeSpan.FromSeconds(30); using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) @@ -874,16 +875,17 @@ await adminClient.AlterUserScramCredentialsAsync(alterations, } } - static async Task ListOffsetsAsync(string bootstrapServers, string[] commandArgs) { + static async Task ListOffsetsAsync(string bootstrapServers, string[] commandArgs) + { var listOffsetsArgs = ParseListOffsetsArgs(commandArgs); if (listOffsetsArgs == null) { return; } - + var isolationLevel = listOffsetsArgs.Item1; var topicPartitionOffsets = listOffsetsArgs.Item2; - + var timeout = TimeSpan.FromSeconds(30); - ListOffsetsOptions options = new ListOffsetsOptions(){ RequestTimeout = timeout, IsolationLevel = isolationLevel }; + ListOffsetsOptions options = new ListOffsetsOptions() { RequestTimeout = timeout, IsolationLevel = isolationLevel }; using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) { @@ -916,7 +918,7 @@ static void PrintTopicDescriptions(List topicDescriptions, boo foreach (var partition in topic.Partitions) { Console.WriteLine($" Partition ID: {partition.Partition} with leader: {partition.Leader}"); - if(!partition.ISR.Any()) + if (!partition.ISR.Any()) { Console.WriteLine(" There is no In-Sync-Replica broker for the partition"); } @@ -926,7 +928,7 @@ static void PrintTopicDescriptions(List topicDescriptions, boo Console.WriteLine($" The In-Sync-Replica brokers are: {isrs}"); } - if(!partition.Replicas.Any()) + if (!partition.Replicas.Any()) { Console.WriteLine(" There is no Replica broker for the partition"); } @@ -935,7 +937,7 @@ static void PrintTopicDescriptions(List topicDescriptions, boo string replicas = string.Join("; ", partition.Replicas); Console.WriteLine($" The Replica brokers are: {replicas}"); } - + } Console.WriteLine($" Is internal: {topic.IsInternal}"); if (includeAuthorizedOperations) @@ -954,7 +956,7 @@ static async Task DescribeTopicsAsync(string bootstrapServers, string[] commandA Environment.ExitCode = 1; return; } - + var username = commandArgs[0]; var password = commandArgs[1]; var includeAuthorizedOperations = (commandArgs[2] == "1"); @@ -991,7 +993,7 @@ static async Task DescribeTopicsAsync(string bootstrapServers, string[] commandA { var descResult = await adminClient.DescribeTopicsAsync( TopicCollection.OfTopicNames(topicNames), - new DescribeTopicsOptions() { RequestTimeout = timeout , IncludeAuthorizedOperations = includeAuthorizedOperations}); + new DescribeTopicsOptions() { RequestTimeout = timeout, IncludeAuthorizedOperations = includeAuthorizedOperations }); PrintTopicDescriptions(descResult.TopicDescriptions, includeAuthorizedOperations); } catch (DescribeTopicsException e) @@ -1041,11 +1043,11 @@ static async Task DescribeClusterAsync(string bootstrapServers, string[] command { try { - var descResult = await adminClient.DescribeClusterAsync(new DescribeClusterOptions() { RequestTimeout = timeout , IncludeAuthorizedOperations = includeAuthorizedOperations}); - + var descResult = await adminClient.DescribeClusterAsync(new DescribeClusterOptions() { RequestTimeout = timeout, IncludeAuthorizedOperations = includeAuthorizedOperations }); + Console.WriteLine($" Cluster Id: {descResult.ClusterId}\n Controller: {descResult.Controller}"); Console.WriteLine(" Nodes:"); - foreach(var node in descResult.Nodes) + foreach (var node in descResult.Nodes) { Console.WriteLine($" {node}"); } @@ -1072,7 +1074,7 @@ public static async Task Main(string[] args) "list-groups", "metadata", "library-version", "create-topic", "create-acls", "list-consumer-groups", "describe-consumer-groups", "list-consumer-group-offsets", "alter-consumer-group-offsets", - "incremental-alter-configs", "describe-user-scram-credentials", + "incremental-alter-configs", "describe-user-scram-credentials", "alter-user-scram-credentials", "describe-topics", "describe-cluster", "list-offsets" }) + diff --git a/examples/AvroBlogExamples/Program.cs b/examples/AvroBlogExamples/Program.cs index 2e13b09bb..6d2074630 100644 --- a/examples/AvroBlogExamples/Program.cs +++ b/examples/AvroBlogExamples/Program.cs @@ -1,174 +1,175 @@ -// Copyright 2018 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using Avro.Generic; -using Confluent.Kafka; -using Confluent.Kafka.SyncOverAsync; -using Confluent.SchemaRegistry.Serdes; -using Confluent.SchemaRegistry; -using System; -using System.Collections.Generic; -using System.IO; -using System.Threading; -using System.Threading.Tasks; - - -namespace AvroBlogExample -{ - /// - /// Complete source for the examples programs presented in the blog post: - /// https://www.confluent.io/blog/decoupling-systems-with-apache-kafka-schema-registry-and-avro/ - /// - class Program - { - async static Task ProduceGeneric(string bootstrapServers, string schemaRegistryUrl) - { - using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) - using (var producer = - new ProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }) - .SetValueSerializer(new AvroSerializer(schemaRegistry)) - .Build()) - { - var logLevelSchema = (Avro.EnumSchema)Avro.Schema.Parse( - File.ReadAllText("LogLevel.asvc")); - - var logMessageSchema = (Avro.RecordSchema)Avro.Schema - .Parse(File.ReadAllText("LogMessage.V1.asvc") - .Replace( - "MessageTypes.LogLevel", - File.ReadAllText("LogLevel.asvc"))); - - var record = new GenericRecord(logMessageSchema); - record.Add("IP", "127.0.0.1"); - record.Add("Message", "a test log message"); - record.Add("Severity", new GenericEnum(logLevelSchema, "Error")); - await producer - .ProduceAsync("log-messages", new Message { Value = record }) - .ContinueWith(task => Console.WriteLine( - task.IsFaulted - ? $"error producing message: {task.Exception.Message}" - : $"produced to: {task.Result.TopicPartitionOffset}")); - - producer.Flush(TimeSpan.FromSeconds(30)); - } - } - - async static Task ProduceSpecific(string bootstrapServers, string schemaRegistryUrl) - { - using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) - using (var producer = - new ProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }) - .SetValueSerializer(new AvroSerializer(schemaRegistry)) - .Build()) - { - await producer.ProduceAsync("log-messages", - new Message - { - Value = new MessageTypes.LogMessage - { - IP = "192.168.0.1", - Message = "a test message 2", - Severity = MessageTypes.LogLevel.Info, - Tags = new Dictionary { { "location", "CA" } } - } - }); - - producer.Flush(TimeSpan.FromSeconds(30)); - } - } - - static void ConsumeSpecific(string bootstrapServers, string schemaRegistryUrl) - { - CancellationTokenSource cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => { - e.Cancel = true; // prevent the process from terminating. - cts.Cancel(); - }; - - var consumerConfig = new ConsumerConfig - { - GroupId = Guid.NewGuid().ToString(), - BootstrapServers = bootstrapServers, - AutoOffsetReset = AutoOffsetReset.Earliest - }; - - using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) - using (var consumer = - new ConsumerBuilder(consumerConfig) - .SetValueDeserializer(new AvroDeserializer(schemaRegistry).AsSyncOverAsync()) - .Build()) - { - consumer.Subscribe("log-messages"); - - try - { - while (true) - { - try - { - var consumeResult = consumer.Consume(cts.Token); - - Console.WriteLine( - consumeResult.Message.Timestamp.UtcDateTime.ToString("yyyy-MM-dd HH:mm:ss") - + $": [{consumeResult.Message.Value.Severity}] {consumeResult.Message.Value.Message}"); - } - catch (ConsumeException e) - { - Console.WriteLine($"an error occured: {e.Error.Reason}"); - } - } - } - catch (OperationCanceledException) - { - // commit final offsets and leave the group. - consumer.Close(); - } - } - } - - private static void PrintUsage() - => Console.WriteLine("Usage: .. "); - - static async Task Main(string[] args) - { - if (args.Length != 3) - { - PrintUsage(); - return; - } - - var mode = args[0]; - var bootstrapServers = args[1]; - var schemaRegistryUrl = args[2]; - - switch (mode) - { - case "generic-produce": - await ProduceGeneric(bootstrapServers, schemaRegistryUrl); - break; - case "specific-produce": - await ProduceSpecific(bootstrapServers, schemaRegistryUrl); - break; - case "consume": - ConsumeSpecific(bootstrapServers, schemaRegistryUrl); - break; - default: - PrintUsage(); - break; - } - } - } -} +// Copyright 2018 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using Avro.Generic; +using Confluent.Kafka; +using Confluent.Kafka.SyncOverAsync; +using Confluent.SchemaRegistry.Serdes; +using Confluent.SchemaRegistry; +using System; +using System.Collections.Generic; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + + +namespace AvroBlogExample +{ + /// + /// Complete source for the examples programs presented in the blog post: + /// https://www.confluent.io/blog/decoupling-systems-with-apache-kafka-schema-registry-and-avro/ + /// + class Program + { + async static Task ProduceGeneric(string bootstrapServers, string schemaRegistryUrl) + { + using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) + using (var producer = + new ProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }) + .SetValueSerializer(new AvroSerializer(schemaRegistry)) + .Build()) + { + var logLevelSchema = (Avro.EnumSchema)Avro.Schema.Parse( + File.ReadAllText("LogLevel.asvc")); + + var logMessageSchema = (Avro.RecordSchema)Avro.Schema + .Parse(File.ReadAllText("LogMessage.V1.asvc") + .Replace( + "MessageTypes.LogLevel", + File.ReadAllText("LogLevel.asvc"))); + + var record = new GenericRecord(logMessageSchema); + record.Add("IP", "127.0.0.1"); + record.Add("Message", "a test log message"); + record.Add("Severity", new GenericEnum(logLevelSchema, "Error")); + await producer + .ProduceAsync("log-messages", new Message { Value = record }) + .ContinueWith(task => Console.WriteLine( + task.IsFaulted + ? $"error producing message: {task.Exception.Message}" + : $"produced to: {task.Result.TopicPartitionOffset}")); + + producer.Flush(TimeSpan.FromSeconds(30)); + } + } + + async static Task ProduceSpecific(string bootstrapServers, string schemaRegistryUrl) + { + using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) + using (var producer = + new ProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }) + .SetValueSerializer(new AvroSerializer(schemaRegistry)) + .Build()) + { + await producer.ProduceAsync("log-messages", + new Message + { + Value = new MessageTypes.LogMessage + { + IP = "192.168.0.1", + Message = "a test message 2", + Severity = MessageTypes.LogLevel.Info, + Tags = new Dictionary { { "location", "CA" } } + } + }); + + producer.Flush(TimeSpan.FromSeconds(30)); + } + } + + static void ConsumeSpecific(string bootstrapServers, string schemaRegistryUrl) + { + CancellationTokenSource cts = new CancellationTokenSource(); + Console.CancelKeyPress += (_, e) => + { + e.Cancel = true; // prevent the process from terminating. + cts.Cancel(); + }; + + var consumerConfig = new ConsumerConfig + { + GroupId = Guid.NewGuid().ToString(), + BootstrapServers = bootstrapServers, + AutoOffsetReset = AutoOffsetReset.Earliest + }; + + using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) + using (var consumer = + new ConsumerBuilder(consumerConfig) + .SetValueDeserializer(new AvroDeserializer(schemaRegistry).AsSyncOverAsync()) + .Build()) + { + consumer.Subscribe("log-messages"); + + try + { + while (true) + { + try + { + var consumeResult = consumer.Consume(cts.Token); + + Console.WriteLine( + consumeResult.Message.Timestamp.UtcDateTime.ToString("yyyy-MM-dd HH:mm:ss") + + $": [{consumeResult.Message.Value.Severity}] {consumeResult.Message.Value.Message}"); + } + catch (ConsumeException e) + { + Console.WriteLine($"an error occured: {e.Error.Reason}"); + } + } + } + catch (OperationCanceledException) + { + // commit final offsets and leave the group. + consumer.Close(); + } + } + } + + private static void PrintUsage() + => Console.WriteLine("Usage: .. "); + + static async Task Main(string[] args) + { + if (args.Length != 3) + { + PrintUsage(); + return; + } + + var mode = args[0]; + var bootstrapServers = args[1]; + var schemaRegistryUrl = args[2]; + + switch (mode) + { + case "generic-produce": + await ProduceGeneric(bootstrapServers, schemaRegistryUrl); + break; + case "specific-produce": + await ProduceSpecific(bootstrapServers, schemaRegistryUrl); + break; + case "consume": + ConsumeSpecific(bootstrapServers, schemaRegistryUrl); + break; + default: + PrintUsage(); + break; + } + } + } +} diff --git a/examples/AvroGeneric/Program.cs b/examples/AvroGeneric/Program.cs index b6a9a39f4..e7f0bc1c4 100644 --- a/examples/AvroGeneric/Program.cs +++ b/examples/AvroGeneric/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/examples/AvroGenericEncryption/Program.cs b/examples/AvroGenericEncryption/Program.cs index a8b966f83..0dffe13ea 100644 --- a/examples/AvroGenericEncryption/Program.cs +++ b/examples/AvroGenericEncryption/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -143,7 +143,7 @@ static async Task Main(string[] args) .Build()) { schemaRegistry.RegisterSchemaAsync(subjectName, schema, true); - + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); long i = 1; diff --git a/examples/AvroGenericMigration/Program.cs b/examples/AvroGenericMigration/Program.cs index e9ce5ed57..71bd3eb07 100644 --- a/examples/AvroGenericMigration/Program.cs +++ b/examples/AvroGenericMigration/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/examples/AvroSpecific/Program.cs b/examples/AvroSpecific/Program.cs index 241b0cb28..71b8ecadc 100644 --- a/examples/AvroSpecific/Program.cs +++ b/examples/AvroSpecific/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/examples/AvroSpecificEncryption/Program.cs b/examples/AvroSpecificEncryption/Program.cs index 96e6245b0..a730654aa 100644 --- a/examples/AvroSpecificEncryption/Program.cs +++ b/examples/AvroSpecificEncryption/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ static void Main(string[] args) Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName kekName kmsType kmsKeyId"); return; } - + // Register the KMS drivers and the field encryption executor AwsKmsDriver.Register(); AzureKmsDriver.Register(); @@ -146,7 +146,7 @@ static void Main(string[] args) .Build()) { schemaRegistry.RegisterSchemaAsync(subjectName, schema, true); - + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); int i = 1; diff --git a/examples/ConfluentCloud/Program.cs b/examples/ConfluentCloud/Program.cs index c8964bcd6..d693292d4 100644 --- a/examples/ConfluentCloud/Program.cs +++ b/examples/ConfluentCloud/Program.cs @@ -1,93 +1,93 @@ -// Copyright 2018 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using Confluent.Kafka; -using System; - - -namespace ConfluentCloudExample -{ - /// - /// This is a simple example demonstrating how to produce a message to - /// Confluent Cloud then read it back again. - /// - /// https://www.confluent.io/confluent-cloud/ - /// - /// Confluent Cloud does not auto-create topics. You will need to use the ccloud - /// cli to create the dotnet-test-topic topic before running this example. The - /// , and parameters are - /// available via the confluent cloud web interface. For more information, - /// refer to the quick-start: - /// - /// https://docs.confluent.io/current/cloud-quickstart.html - /// - class Program - { - static void Main(string[] args) - { - var pConfig = new ProducerConfig - { - BootstrapServers = "", - SaslMechanism = SaslMechanism.Plain, - SecurityProtocol = SecurityProtocol.SaslSsl, - // Note: If your root CA certificates are in an unusual location you - // may need to specify this using the SslCaLocation property. - SaslUsername = "", - SaslPassword = "" - }; - - using (var producer = new ProducerBuilder(pConfig).Build()) - { - producer.ProduceAsync("dotnet-test-topic", new Message { Value = "test value" }) - .ContinueWith(task => task.IsFaulted - ? $"error producing message: {task.Exception.Message}" - : $"produced to: {task.Result.TopicPartitionOffset}"); - - // block until all in-flight produce requests have completed (successfully - // or otherwise) or 10s has elapsed. - producer.Flush(TimeSpan.FromSeconds(10)); - } - - var cConfig = new ConsumerConfig - { - BootstrapServers = "", - SaslMechanism = SaslMechanism.Plain, - SecurityProtocol = SecurityProtocol.SaslSsl, - SaslUsername = "", - SaslPassword = "", - GroupId = Guid.NewGuid().ToString(), - AutoOffsetReset = AutoOffsetReset.Earliest - }; - - using (var consumer = new ConsumerBuilder(cConfig).Build()) - { - consumer.Subscribe("dotnet-test-topic"); - - try - { - var consumeResult = consumer.Consume(); - Console.WriteLine($"consumed: {consumeResult.Message.Value}"); - } - catch (ConsumeException e) - { - Console.WriteLine($"consume error: {e.Error.Reason}"); - } - - consumer.Close(); - } - } - } -} +// Copyright 2018 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using Confluent.Kafka; +using System; + + +namespace ConfluentCloudExample +{ + /// + /// This is a simple example demonstrating how to produce a message to + /// Confluent Cloud then read it back again. + /// + /// https://www.confluent.io/confluent-cloud/ + /// + /// Confluent Cloud does not auto-create topics. You will need to use the ccloud + /// cli to create the dotnet-test-topic topic before running this example. The + /// , and parameters are + /// available via the confluent cloud web interface. For more information, + /// refer to the quick-start: + /// + /// https://docs.confluent.io/current/cloud-quickstart.html + /// + class Program + { + static void Main(string[] args) + { + var pConfig = new ProducerConfig + { + BootstrapServers = "", + SaslMechanism = SaslMechanism.Plain, + SecurityProtocol = SecurityProtocol.SaslSsl, + // Note: If your root CA certificates are in an unusual location you + // may need to specify this using the SslCaLocation property. + SaslUsername = "", + SaslPassword = "" + }; + + using (var producer = new ProducerBuilder(pConfig).Build()) + { + producer.ProduceAsync("dotnet-test-topic", new Message { Value = "test value" }) + .ContinueWith(task => task.IsFaulted + ? $"error producing message: {task.Exception.Message}" + : $"produced to: {task.Result.TopicPartitionOffset}"); + + // block until all in-flight produce requests have completed (successfully + // or otherwise) or 10s has elapsed. + producer.Flush(TimeSpan.FromSeconds(10)); + } + + var cConfig = new ConsumerConfig + { + BootstrapServers = "", + SaslMechanism = SaslMechanism.Plain, + SecurityProtocol = SecurityProtocol.SaslSsl, + SaslUsername = "", + SaslPassword = "", + GroupId = Guid.NewGuid().ToString(), + AutoOffsetReset = AutoOffsetReset.Earliest + }; + + using (var consumer = new ConsumerBuilder(cConfig).Build()) + { + consumer.Subscribe("dotnet-test-topic"); + + try + { + var consumeResult = consumer.Consume(); + Console.WriteLine($"consumed: {consumeResult.Message.Value}"); + } + catch (ConsumeException e) + { + Console.WriteLine($"consume error: {e.Error.Reason}"); + } + + consumer.Close(); + } + } + } +} diff --git a/examples/Consumer/Program.cs b/examples/Consumer/Program.cs index 00b5d490c..10a0a7727 100644 --- a/examples/Consumer/Program.cs +++ b/examples/Consumer/Program.cs @@ -212,7 +212,8 @@ public static void Main(string[] args) Console.WriteLine($"Started consumer, Ctrl-C to stop consuming"); CancellationTokenSource cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => { + Console.CancelKeyPress += (_, e) => + { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; diff --git a/examples/ExactlyOnce/Program.cs b/examples/ExactlyOnce/Program.cs index 6cb82280b..28d0217ce 100644 --- a/examples/ExactlyOnce/Program.cs +++ b/examples/ExactlyOnce/Program.cs @@ -325,7 +325,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT using (var producer = new ProducerBuilder(pConfig).Build()) using (var consumer = new ConsumerBuilder(cConfig) - .SetPartitionsRevokedHandler((c, partitions) => { + .SetPartitionsRevokedHandler((c, partitions) => + { var remaining = c.Assignment.Where(tp => partitions.Where(x => x.TopicPartition == tp).Count() == 0); Console.WriteLine( "** MapWords consumer group partitions revoked: [" + @@ -351,7 +352,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT producer.BeginTransaction(); }) - .SetPartitionsLostHandler((c, partitions) => { + .SetPartitionsLostHandler((c, partitions) => + { // Ownership of the partitions has been involuntarily lost and // are now likely already owned by another consumer. @@ -364,7 +366,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT producer.BeginTransaction(); }) - .SetPartitionsAssignedHandler((c, partitions) => { + .SetPartitionsAssignedHandler((c, partitions) => + { Console.WriteLine( "** MapWords consumer group additional partitions assigned: [" + string.Join(',', partitions.Select(p => p.Partition.Value)) + @@ -574,7 +577,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, using (var producer = new ProducerBuilder(pConfig).Build()) using (var consumer = new ConsumerBuilder(cConfig) - .SetPartitionsRevokedHandler((c, partitions) => { + .SetPartitionsRevokedHandler((c, partitions) => + { var remaining = c.Assignment.Where(tp => partitions.Where(x => x.TopicPartition == tp).Count() == 0); Console.WriteLine( "** AggregateWords consumer group partitions revoked: [" + @@ -598,7 +602,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, producer.BeginTransaction(); }) - .SetPartitionsLostHandler((c, partitions) => { + .SetPartitionsLostHandler((c, partitions) => + { Console.WriteLine( "** AggregateWords consumer group partitions lost: [" + string.Join(',', partitions.Select(p => p.Partition.Value)) + @@ -615,7 +620,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, producer.BeginTransaction(); }) - .SetPartitionsAssignedHandler((c, partitions) => { + .SetPartitionsAssignedHandler((c, partitions) => + { Console.WriteLine( "** AggregateWords consumer group partition assigned: [" + string.Join(',', partitions.Select(p => p.Partition.Value)) + @@ -710,11 +716,11 @@ public async static Task PeriodicallyDisplayTopCountsState(string brokerList, Ca var store = kvp.Value; var itr = store.Store.Iterate(); - while(itr.GetNext(out var recordInfo)) + while (itr.GetNext(out var recordInfo)) { var wc = (itr.GetValue(), itr.GetKey()); if (maxWords.Count < N) { maxWords.Add(wc); } - else { if (wc.Item1 > maxWords[N-1].Item1) { maxWords[N-1] = wc; } } + else { if (wc.Item1 > maxWords[N - 1].Item1) { maxWords[N - 1] = wc; } } maxWords.Sort((x, y) => y.Item1.CompareTo(x.Item1)); } } @@ -749,7 +755,8 @@ public static async Task Main(string[] args) string clientId = args.Length > 2 ? args[2] : null; CancellationTokenSource cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => { + Console.CancelKeyPress += (_, e) => + { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; @@ -767,12 +774,12 @@ public static async Task Main(string[] args) case "gen": try { await Generator_LineInputData(brokerList, clientId, cts.Token); } - catch (OperationCanceledException) {} + catch (OperationCanceledException) { } return; case "map": try { Processor_MapWords(brokerList, clientId, cts.Token); } - catch (OperationCanceledException) {} + catch (OperationCanceledException) { } return; case "reduce": diff --git a/examples/ExactlyOnceOldBroker/Program.cs b/examples/ExactlyOnceOldBroker/Program.cs index d50ec57ba..b9f575e37 100644 --- a/examples/ExactlyOnceOldBroker/Program.cs +++ b/examples/ExactlyOnceOldBroker/Program.cs @@ -39,9 +39,9 @@ namespace Confluent.Kafka.Examples.Transactions /// consumed from the corresponding input /// partition. /// - public class ProducerState + public class ProducerState { - public IProducer Producer { get; set; } + public IProducer Producer { get; set; } public Offset Offset { get; set; } } @@ -240,7 +240,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT var producerState = new Dictionary>(); using (var consumer = new ConsumerBuilder(cConfig) - .SetPartitionsRevokedHandler((c, partitions) => { + .SetPartitionsRevokedHandler((c, partitions) => + { // Note: All handlers (except the log handler) are executed // as a side-effect of, and on the same thread as the Consume // or Close methods. Any exception thrown in a handler (with @@ -256,7 +257,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT var tasks = new List(); foreach (var p in producerState.Values) { - tasks.Add(Task.Run(() => { + tasks.Add(Task.Run(() => + { p.Producer.AbortTransaction(DefaultTimeout); // Note: Not cancellable yet. p.Producer.Dispose(); }, ct)); @@ -268,7 +270,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT } producerState.Clear(); }) - .SetPartitionsAssignedHandler((c, partitions) => { + .SetPartitionsAssignedHandler((c, partitions) => + { Console.WriteLine( "** MapWords consumer group rebalanced. Partition assignment: [" + string.Join(',', partitions.Select(p => p.Partition.Value)) + @@ -281,7 +284,8 @@ static void Processor_MapWords(string brokerList, string clientId, CancellationT var tasks = new List(); foreach (var tp in partitions) { - tasks.Add(Task.Run(() => { + tasks.Add(Task.Run(() => + { var pConfig = new ProducerConfig { BootstrapServers = brokerList, @@ -485,7 +489,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, var producerState = new Dictionary>(); using (var consumer = new ConsumerBuilder(cConfig) - .SetPartitionsRevokedHandler((c, partitions) => { + .SetPartitionsRevokedHandler((c, partitions) => + { // clear rocksdb state. db.DropColumnFamily("counts"); db.CreateColumnFamily(new ColumnFamilyOptions(), "counts"); @@ -493,7 +498,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, var tasks = new List(); foreach (var p in producerState.Values) { - tasks.Add(Task.Run(() => { + tasks.Add(Task.Run(() => + { p.Producer.AbortTransaction(DefaultTimeout); // Note: Not cancellable yet. p.Producer.Dispose(); }, ct)); @@ -505,7 +511,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, Task.WaitAll(tasks.ToArray()); producerState.Clear(); }) - .SetPartitionsAssignedHandler((c, partitions) => { + .SetPartitionsAssignedHandler((c, partitions) => + { Console.WriteLine( "** AggregateWords consumer group rebalanced. Partition assignment: [" + string.Join(',', partitions.Select(p => p.Partition.Value)) + @@ -516,7 +523,8 @@ public static void Processor_AggregateWords(string brokerList, string clientId, var tasks = new List(); foreach (var tp in partitions) { - tasks.Add(Task.Run(() => { + tasks.Add(Task.Run(() => + { var pConfig = new ProducerConfig { BootstrapServers = brokerList, @@ -553,7 +561,7 @@ public static void Processor_AggregateWords(string brokerList, string clientId, var kBytes = Encoding.UTF8.GetBytes(cr.Message.Key); var vBytes = db.Get(kBytes, columnFamily); var v = vBytes == null ? 0 : BitConverter.ToInt32(vBytes); - var updatedV = v+1; + var updatedV = v + 1; db.Put(kBytes, BitConverter.GetBytes(updatedV), columnFamily); @@ -628,7 +636,7 @@ public async static Task PeriodicallyDisplayTopCountsState(string brokerList, Ro { var wc = (BitConverter.ToInt32(it.Value()), Encoding.UTF8.GetString(it.Key())); if (maxWords.Count < N) { maxWords.Add(wc); } - else { if (wc.Item1 > maxWords[N-1].Item1) { maxWords[N-1] = wc; } } + else { if (wc.Item1 > maxWords[N - 1].Item1) { maxWords[N - 1] = wc; } } maxWords.Sort((x, y) => y.Item1.CompareTo(x.Item1)); it.Next(); } @@ -656,7 +664,8 @@ public static async Task Main(string[] args) string mode = args[1]; CancellationTokenSource cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => { + Console.CancelKeyPress += (_, e) => + { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; @@ -672,7 +681,7 @@ public static async Task Main(string[] args) if (mode == "gen") { try { await Generator_LineInputData(brokerList, cts.Token); } - catch (OperationCanceledException) {} + catch (OperationCanceledException) { } return; } diff --git a/examples/JsonEncryption/Program.cs b/examples/JsonEncryption/Program.cs index 5e6b072cb..f1ca76b57 100644 --- a/examples/JsonEncryption/Program.cs +++ b/examples/JsonEncryption/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -108,7 +108,7 @@ static async Task Main(string[] args) } } }"; - + var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers @@ -199,7 +199,7 @@ static async Task Main(string[] args) .Build()) { await schemaRegistry.RegisterSchemaAsync(subjectName, schema, true); - + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter first names, q to exit."); long i = 1; @@ -207,11 +207,11 @@ static async Task Main(string[] args) while ((text = Console.ReadLine()) != "q") { User user = new User { Name = text, FavoriteColor = "blue", FavoriteNumber = i++ }; - try + try { await producer.ProduceAsync(topicName, new Message { Value = user }); } - catch (Exception e) + catch (Exception e) { Console.WriteLine($"error producing message: {e.Message}"); } diff --git a/examples/JsonSerialization/Program.cs b/examples/JsonSerialization/Program.cs index a84e51653..a74e1fa6f 100644 --- a/examples/JsonSerialization/Program.cs +++ b/examples/JsonSerialization/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2020 Confluent Inc. +// Copyright 2020 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -149,11 +149,11 @@ static async Task Main(string[] args) while ((text = Console.ReadLine()) != "q") { User user = new User { Name = text, FavoriteColor = "blue", FavoriteNumber = i++ }; - try + try { await producer.ProduceAsync(topicName, new Message { Value = user }); } - catch (Exception e) + catch (Exception e) { Console.WriteLine($"error producing message: {e.Message}"); } diff --git a/examples/JsonWithReferences/Program.cs b/examples/JsonWithReferences/Program.cs index 1578aec39..42ffb687d 100644 --- a/examples/JsonWithReferences/Program.cs +++ b/examples/JsonWithReferences/Program.cs @@ -1,253 +1,253 @@ -// Copyright 2023 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using Confluent.Kafka.SyncOverAsync; -using Confluent.SchemaRegistry; -using Confluent.SchemaRegistry.Serdes; -using System; -using System.IO; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using NJsonSchema.Generation; -using Newtonsoft.Json; -using Newtonsoft.Json.Serialization; - - -/// -/// An example of working with JSON schemas with external, -/// references and Json data, Apache Kafka and -/// Confluent Schema Registry (v5.5 or later required for -/// JSON schema support). -/// -namespace Confluent.Kafka.Examples.JsonWithReferences -{ - - /// - /// The deserializer allows multiple ways to consume data. - /// - /// If the consumer is aware of the entire schema details, - /// they can create a class corresponding to it and use the - /// deserializer in these ways: - /// - without passing a schema, the deserializer will convert - /// the serialized string to the object of this class. - /// - pass a schema and allow validating against it. - /// - /// Note: The user can also pass JObject to the - /// ConsumerBuilder and JsonDeserializer - /// in order to get JObject instead in consumer, this is possible - /// in the producer too. - /// - public class Product - { - public long ProductId { get; set; } - - public string ProductName { get; set; } - - public decimal Price { get; set; } - - public List Tags { get; set; } - - public Dimensions Dimensions { get; set; } - - public GeographicalLocation WarehouseLocation { get; set; } - } - - public class Dimensions - { - public decimal Length { get; set; } - - public decimal Width { get; set; } - - public decimal Height { get; set; } - } - - public class GeographicalLocation - { - public decimal Latitude { get; set; } - - public decimal Longitude { get; set; } - } - - /// - /// Internally, the JSON serializer uses Newtonsoft.Json for - /// serialization and NJsonSchema for schema creation and - /// validation. - /// - class Program - { - // from: https://json-schema.org/learn/getting-started-step-by-step.html - private static string S1; - private static string S2; - static async Task Main(string[] args) - { - if (args.Length != 3) - { - Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName"); - return; - } - - S1 = File.ReadAllText("geographical-location.json"); - S2 = File.ReadAllText("product.json"); - string bootstrapServers = args[0]; - string schemaRegistryUrl = args[1]; - string topicName = args[2]; - - var consumerConfig = new ConsumerConfig - { - BootstrapServers = bootstrapServers, - GroupId = "json-example-consumer-group" - }; - - var producerConfig = new ProducerConfig - { - BootstrapServers = bootstrapServers - }; - - var schemaRegistryConfig = new SchemaRegistryConfig - { - Url = schemaRegistryUrl - }; - - var sr = new CachedSchemaRegistryClient(schemaRegistryConfig); - - var subject1 = $"{topicName}-CoordinatesOnMap"; - var subject2 = $"{topicName}-Product"; - - // Test there are no errors (exceptions) registering a schema that references another. - var id1 = sr.RegisterSchemaAsync(subject1, new Schema(S1, SchemaType.Json)).Result; - var s1 = sr.GetLatestSchemaAsync(subject1).Result; - var refs = new List { new SchemaReference("geographical-location.json", subject1, s1.Version) }; - var id2 = sr.RegisterSchemaAsync(subject2, new Schema(S2, refs, SchemaType.Json)).Result; - - // In fact, it seems references are not checked server side. - var latestSchema2 = sr.GetLatestSchemaAsync(subject2).Result; - var latestSchema2Unreg = latestSchema2.Schema; - var latestSchema1 = sr.GetLatestSchemaAsync(subject1).Result; - - var jsonSerializerConfig = new JsonSerializerConfig - { - BufferBytes = 100, - UseLatestVersion = true, - AutoRegisterSchemas = false, - SubjectNameStrategy = SubjectNameStrategy.TopicRecord - }; - - // This is needed only if you want to change attribute naming strategy - // from default one to camelCase. - // It's also possible to add JsonProperty attributes to customize - // serialization mapping and all available NJson attributes. - var jsonSchemaGeneratorSettings = new JsonSchemaGeneratorSettings - { - SerializerSettings = new JsonSerializerSettings - { - ContractResolver = new DefaultContractResolver - { - NamingStrategy = new CamelCaseNamingStrategy() - } - } - }; - - CancellationTokenSource cts = new CancellationTokenSource(); - - var consumeTask = Task.Run(() => - { - using (var consumer = - new ConsumerBuilder(consumerConfig) - .SetValueDeserializer(new JsonDeserializer(sr, latestSchema2Unreg, null, jsonSchemaGeneratorSettings).AsSyncOverAsync()) - .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) - .Build()) - { - consumer.Subscribe(topicName); - - try - { - while (true) - { - try - { - var cr = consumer.Consume(cts.Token); - var product = cr.Message.Value; - - Console.WriteLine("CONSUMER: product name " + product.ProductName + - $" Product id {product.ProductId} " + - $"Price: {product.Price} " + - $"Latitude: {product.WarehouseLocation.Latitude} " + - $"Longitude: {product.WarehouseLocation.Longitude}"); - } - catch (ConsumeException e) - { - Console.WriteLine($"Consume error: {e.Error.Reason}"); - } - } - } - catch (OperationCanceledException) - { - consumer.Close(); - } - } - }); - - using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) - using (var producer = - new ProducerBuilder(producerConfig) - .SetValueSerializer(new JsonSerializer(schemaRegistry, latestSchema2Unreg, - jsonSerializerConfig, jsonSchemaGeneratorSettings)) - .Build()) - { - Console.WriteLine($"PRODUCER: {producer.Name} producing on {topicName}. Enter product name, q to exit."); - - long i = 1; - string text; - while ((text = Console.ReadLine()) != "q") - { - var product = new Product - { - ProductId = i++, - ProductName = text, - Price = 9.99M, - Tags = new List { "tag1", "tag2" }, - Dimensions = new Dimensions - { - Length = 10.0M, - Width = 5.0M, - Height = 2.0M - }, - WarehouseLocation = new GeographicalLocation - { - Latitude = 37.7749M, - Longitude = -122.4194M - } - }; - try - { - await producer.ProduceAsync(topicName, new Message - { - Key = product.ProductId, - Value = product - }); - } - catch (Exception e) - { - Console.WriteLine($"error producing message: {e.Message}"); - } - Console.WriteLine($"{producer.Name} producing on {topicName}. Enter product name, q to exit."); - } - } - cts.Cancel(); - } - } -} +// Copyright 2023 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using Confluent.Kafka.SyncOverAsync; +using Confluent.SchemaRegistry; +using Confluent.SchemaRegistry.Serdes; +using System; +using System.IO; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using NJsonSchema.Generation; +using Newtonsoft.Json; +using Newtonsoft.Json.Serialization; + + +/// +/// An example of working with JSON schemas with external, +/// references and Json data, Apache Kafka and +/// Confluent Schema Registry (v5.5 or later required for +/// JSON schema support). +/// +namespace Confluent.Kafka.Examples.JsonWithReferences +{ + + /// + /// The deserializer allows multiple ways to consume data. + /// + /// If the consumer is aware of the entire schema details, + /// they can create a class corresponding to it and use the + /// deserializer in these ways: + /// - without passing a schema, the deserializer will convert + /// the serialized string to the object of this class. + /// - pass a schema and allow validating against it. + /// + /// Note: The user can also pass JObject to the + /// ConsumerBuilder and JsonDeserializer + /// in order to get JObject instead in consumer, this is possible + /// in the producer too. + /// + public class Product + { + public long ProductId { get; set; } + + public string ProductName { get; set; } + + public decimal Price { get; set; } + + public List Tags { get; set; } + + public Dimensions Dimensions { get; set; } + + public GeographicalLocation WarehouseLocation { get; set; } + } + + public class Dimensions + { + public decimal Length { get; set; } + + public decimal Width { get; set; } + + public decimal Height { get; set; } + } + + public class GeographicalLocation + { + public decimal Latitude { get; set; } + + public decimal Longitude { get; set; } + } + + /// + /// Internally, the JSON serializer uses Newtonsoft.Json for + /// serialization and NJsonSchema for schema creation and + /// validation. + /// + class Program + { + // from: https://json-schema.org/learn/getting-started-step-by-step.html + private static string S1; + private static string S2; + static async Task Main(string[] args) + { + if (args.Length != 3) + { + Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName"); + return; + } + + S1 = File.ReadAllText("geographical-location.json"); + S2 = File.ReadAllText("product.json"); + string bootstrapServers = args[0]; + string schemaRegistryUrl = args[1]; + string topicName = args[2]; + + var consumerConfig = new ConsumerConfig + { + BootstrapServers = bootstrapServers, + GroupId = "json-example-consumer-group" + }; + + var producerConfig = new ProducerConfig + { + BootstrapServers = bootstrapServers + }; + + var schemaRegistryConfig = new SchemaRegistryConfig + { + Url = schemaRegistryUrl + }; + + var sr = new CachedSchemaRegistryClient(schemaRegistryConfig); + + var subject1 = $"{topicName}-CoordinatesOnMap"; + var subject2 = $"{topicName}-Product"; + + // Test there are no errors (exceptions) registering a schema that references another. + var id1 = sr.RegisterSchemaAsync(subject1, new Schema(S1, SchemaType.Json)).Result; + var s1 = sr.GetLatestSchemaAsync(subject1).Result; + var refs = new List { new SchemaReference("geographical-location.json", subject1, s1.Version) }; + var id2 = sr.RegisterSchemaAsync(subject2, new Schema(S2, refs, SchemaType.Json)).Result; + + // In fact, it seems references are not checked server side. + var latestSchema2 = sr.GetLatestSchemaAsync(subject2).Result; + var latestSchema2Unreg = latestSchema2.Schema; + var latestSchema1 = sr.GetLatestSchemaAsync(subject1).Result; + + var jsonSerializerConfig = new JsonSerializerConfig + { + BufferBytes = 100, + UseLatestVersion = true, + AutoRegisterSchemas = false, + SubjectNameStrategy = SubjectNameStrategy.TopicRecord + }; + + // This is needed only if you want to change attribute naming strategy + // from default one to camelCase. + // It's also possible to add JsonProperty attributes to customize + // serialization mapping and all available NJson attributes. + var jsonSchemaGeneratorSettings = new JsonSchemaGeneratorSettings + { + SerializerSettings = new JsonSerializerSettings + { + ContractResolver = new DefaultContractResolver + { + NamingStrategy = new CamelCaseNamingStrategy() + } + } + }; + + CancellationTokenSource cts = new CancellationTokenSource(); + + var consumeTask = Task.Run(() => + { + using (var consumer = + new ConsumerBuilder(consumerConfig) + .SetValueDeserializer(new JsonDeserializer(sr, latestSchema2Unreg, null, jsonSchemaGeneratorSettings).AsSyncOverAsync()) + .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) + .Build()) + { + consumer.Subscribe(topicName); + + try + { + while (true) + { + try + { + var cr = consumer.Consume(cts.Token); + var product = cr.Message.Value; + + Console.WriteLine("CONSUMER: product name " + product.ProductName + + $" Product id {product.ProductId} " + + $"Price: {product.Price} " + + $"Latitude: {product.WarehouseLocation.Latitude} " + + $"Longitude: {product.WarehouseLocation.Longitude}"); + } + catch (ConsumeException e) + { + Console.WriteLine($"Consume error: {e.Error.Reason}"); + } + } + } + catch (OperationCanceledException) + { + consumer.Close(); + } + } + }); + + using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) + using (var producer = + new ProducerBuilder(producerConfig) + .SetValueSerializer(new JsonSerializer(schemaRegistry, latestSchema2Unreg, + jsonSerializerConfig, jsonSchemaGeneratorSettings)) + .Build()) + { + Console.WriteLine($"PRODUCER: {producer.Name} producing on {topicName}. Enter product name, q to exit."); + + long i = 1; + string text; + while ((text = Console.ReadLine()) != "q") + { + var product = new Product + { + ProductId = i++, + ProductName = text, + Price = 9.99M, + Tags = new List { "tag1", "tag2" }, + Dimensions = new Dimensions + { + Length = 10.0M, + Width = 5.0M, + Height = 2.0M + }, + WarehouseLocation = new GeographicalLocation + { + Latitude = 37.7749M, + Longitude = -122.4194M + } + }; + try + { + await producer.ProduceAsync(topicName, new Message + { + Key = product.ProductId, + Value = product + }); + } + catch (Exception e) + { + Console.WriteLine($"error producing message: {e.Message}"); + } + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter product name, q to exit."); + } + } + cts.Cancel(); + } + } +} diff --git a/examples/OAuthConsumer/Program.cs b/examples/OAuthConsumer/Program.cs index 8a651e167..b00e848db 100644 --- a/examples/OAuthConsumer/Program.cs +++ b/examples/OAuthConsumer/Program.cs @@ -1,197 +1,197 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; -using System.Collections.Generic; -using System.Text; -using System.Text.RegularExpressions; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; - -/// -/// An example showing consumer -/// with a custom OAUTHBEARER token implementation. -/// -namespace Confluent.Kafka.Examples.OAuthConsumer -{ - /// - /// A class to store the token and related properties. - /// - class OAuthBearerToken - { - public string TokenValue { get; set; } - public long Expiration { get; set; } - public String Principal { get; set; } - public Dictionary Extensions { get; set; } - } - - public class Program - { - private const String OauthConfigRegexPattern = "^(\\s*(\\w+)\\s*=\\s*(\\w+))+\\s*$"; // 1 or more name=value pairs with optional ignored whitespace - private const String OauthConfigKeyValueRegexPattern = "(\\w+)\\s*=\\s*(\\w+)"; // Extract key=value pairs from OAuth Config - private const String PrincipalClaimNameKey = "principalClaimName"; - private const String PrincipalKey = "principal"; - private const String ScopeKey = "scope"; - - - public static async Task Main(string[] args) - { - if (args.Length != 5) - { - Console.WriteLine("Usage: .. brokerList topic group \"principal= scope=\""); - return; - } - string bootstrapServers = args[1]; - string topicName = args[2]; - string groupId = args[3]; - string oauthConf = args[4]; - - if (!Regex.IsMatch(oauthConf, OauthConfigRegexPattern)) - { - Console.WriteLine($"Invalid OAuth config {oauthConf} passed."); - Environment.Exit(1); - } - - var consumerConfig = new ConsumerConfig - { - BootstrapServers = bootstrapServers, - SecurityProtocol = SecurityProtocol.SaslPlaintext, - SaslMechanism = SaslMechanism.OAuthBearer, - SaslOauthbearerConfig = oauthConf, - GroupId = groupId, - AutoOffsetReset = AutoOffsetReset.Earliest, - EnableAutoOffsetStore = false, - }; - - // Callback to handle OAuth bearer token refresh. It creates an unsecured JWT based on the configuration defined - // in OAuth Config and sets the token on the client for use in any future authentication attempt. - // It must be invoked whenever the client requires a token (i.e. when it first starts and when the - // previously-received token is 80% of the way to its expiration time). - void OauthCallback(IClient client, string cfg) - { - try - { - var token = retrieveUnsecuredToken(cfg); - client.OAuthBearerSetToken(token.TokenValue, token.Expiration, token.Principal); - } - catch (Exception e) - { - client.OAuthBearerSetTokenFailure(e.ToString()); - } - } - - - using (var consumer = new ConsumerBuilder(consumerConfig) - .SetOAuthBearerTokenRefreshHandler(OauthCallback).Build()) - { - Console.WriteLine("\n-----------------------------------------------------------------------"); - Console.WriteLine($"Consumer {consumer.Name} consuming from topic {topicName}."); - Console.WriteLine("-----------------------------------------------------------------------"); - Console.WriteLine("Ctrl-C to quit.\n"); - - consumer.Subscribe(topicName); - CancellationTokenSource cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => - { - e.Cancel = true; // prevent the process from terminating. - cts.Cancel(); - }; - - try - { - while (true) - { - try - { - var consumeResult = consumer.Consume(cts.Token); - - Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: {consumeResult.Message.Value}"); - try - { - consumer.StoreOffset(consumeResult); - } - catch (KafkaException e) - { - Console.WriteLine($"Store Offset error: {e.Error.Reason}"); - } - } - catch (ConsumeException e) - { - Console.WriteLine($"Consume error: {e.Error.Reason}"); - } - } - } - catch (OperationCanceledException) - { - Console.WriteLine("Closing consumer."); - consumer.Close(); - } - } - } - - private static string ToUnpaddedBase64(string s) - => Convert.ToBase64String(Encoding.UTF8.GetBytes(s)).TrimEnd('='); - - private static OAuthBearerToken retrieveUnsecuredToken(String oauthConfig) - { - Console.WriteLine("Refreshing the token"); - - var parsedConfig = new Dictionary(); - foreach (Match match in Regex.Matches(oauthConfig, OauthConfigKeyValueRegexPattern)) - { - parsedConfig[match.Groups[1].ToString()] = match.Groups[2].ToString(); - } - - if (!parsedConfig.ContainsKey(PrincipalKey) || !parsedConfig.ContainsKey(ScopeKey) || parsedConfig.Count > 2) - { - throw new Exception($"Invalid OAuth config {oauthConfig} passed."); - } - - var principalClaimName = parsedConfig.ContainsKey(PrincipalClaimNameKey) ? parsedConfig[PrincipalClaimNameKey] : "sub"; - var principal = parsedConfig[PrincipalKey]; - var scopeValue = parsedConfig[ScopeKey]; - - var issuedAt = DateTimeOffset.UtcNow; - var expiresAt = issuedAt.AddSeconds(5); // setting a low value to show the token refresh in action. - - var header = new - { - alg = "none", - typ = "JWT" - }; - - var payload = new Dictionary - { - {principalClaimName, principal}, - {"iat", issuedAt.ToUnixTimeSeconds()}, - {"exp", expiresAt.ToUnixTimeSeconds()}, - {ScopeKey, scopeValue} - }; - - var headerJson = JsonConvert.SerializeObject(header); - var payloadJson = JsonConvert.SerializeObject(payload); - - return new OAuthBearerToken - { - TokenValue = $"{ToUnpaddedBase64(headerJson)}.{ToUnpaddedBase64(payloadJson)}.", - Expiration = expiresAt.ToUnixTimeMilliseconds(), - Principal = principal, - }; - } - } - -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; +using System.Collections.Generic; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; + +/// +/// An example showing consumer +/// with a custom OAUTHBEARER token implementation. +/// +namespace Confluent.Kafka.Examples.OAuthConsumer +{ + /// + /// A class to store the token and related properties. + /// + class OAuthBearerToken + { + public string TokenValue { get; set; } + public long Expiration { get; set; } + public String Principal { get; set; } + public Dictionary Extensions { get; set; } + } + + public class Program + { + private const String OauthConfigRegexPattern = "^(\\s*(\\w+)\\s*=\\s*(\\w+))+\\s*$"; // 1 or more name=value pairs with optional ignored whitespace + private const String OauthConfigKeyValueRegexPattern = "(\\w+)\\s*=\\s*(\\w+)"; // Extract key=value pairs from OAuth Config + private const String PrincipalClaimNameKey = "principalClaimName"; + private const String PrincipalKey = "principal"; + private const String ScopeKey = "scope"; + + + public static async Task Main(string[] args) + { + if (args.Length != 5) + { + Console.WriteLine("Usage: .. brokerList topic group \"principal= scope=\""); + return; + } + string bootstrapServers = args[1]; + string topicName = args[2]; + string groupId = args[3]; + string oauthConf = args[4]; + + if (!Regex.IsMatch(oauthConf, OauthConfigRegexPattern)) + { + Console.WriteLine($"Invalid OAuth config {oauthConf} passed."); + Environment.Exit(1); + } + + var consumerConfig = new ConsumerConfig + { + BootstrapServers = bootstrapServers, + SecurityProtocol = SecurityProtocol.SaslPlaintext, + SaslMechanism = SaslMechanism.OAuthBearer, + SaslOauthbearerConfig = oauthConf, + GroupId = groupId, + AutoOffsetReset = AutoOffsetReset.Earliest, + EnableAutoOffsetStore = false, + }; + + // Callback to handle OAuth bearer token refresh. It creates an unsecured JWT based on the configuration defined + // in OAuth Config and sets the token on the client for use in any future authentication attempt. + // It must be invoked whenever the client requires a token (i.e. when it first starts and when the + // previously-received token is 80% of the way to its expiration time). + void OauthCallback(IClient client, string cfg) + { + try + { + var token = retrieveUnsecuredToken(cfg); + client.OAuthBearerSetToken(token.TokenValue, token.Expiration, token.Principal); + } + catch (Exception e) + { + client.OAuthBearerSetTokenFailure(e.ToString()); + } + } + + + using (var consumer = new ConsumerBuilder(consumerConfig) + .SetOAuthBearerTokenRefreshHandler(OauthCallback).Build()) + { + Console.WriteLine("\n-----------------------------------------------------------------------"); + Console.WriteLine($"Consumer {consumer.Name} consuming from topic {topicName}."); + Console.WriteLine("-----------------------------------------------------------------------"); + Console.WriteLine("Ctrl-C to quit.\n"); + + consumer.Subscribe(topicName); + CancellationTokenSource cts = new CancellationTokenSource(); + Console.CancelKeyPress += (_, e) => + { + e.Cancel = true; // prevent the process from terminating. + cts.Cancel(); + }; + + try + { + while (true) + { + try + { + var consumeResult = consumer.Consume(cts.Token); + + Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: {consumeResult.Message.Value}"); + try + { + consumer.StoreOffset(consumeResult); + } + catch (KafkaException e) + { + Console.WriteLine($"Store Offset error: {e.Error.Reason}"); + } + } + catch (ConsumeException e) + { + Console.WriteLine($"Consume error: {e.Error.Reason}"); + } + } + } + catch (OperationCanceledException) + { + Console.WriteLine("Closing consumer."); + consumer.Close(); + } + } + } + + private static string ToUnpaddedBase64(string s) + => Convert.ToBase64String(Encoding.UTF8.GetBytes(s)).TrimEnd('='); + + private static OAuthBearerToken retrieveUnsecuredToken(String oauthConfig) + { + Console.WriteLine("Refreshing the token"); + + var parsedConfig = new Dictionary(); + foreach (Match match in Regex.Matches(oauthConfig, OauthConfigKeyValueRegexPattern)) + { + parsedConfig[match.Groups[1].ToString()] = match.Groups[2].ToString(); + } + + if (!parsedConfig.ContainsKey(PrincipalKey) || !parsedConfig.ContainsKey(ScopeKey) || parsedConfig.Count > 2) + { + throw new Exception($"Invalid OAuth config {oauthConfig} passed."); + } + + var principalClaimName = parsedConfig.ContainsKey(PrincipalClaimNameKey) ? parsedConfig[PrincipalClaimNameKey] : "sub"; + var principal = parsedConfig[PrincipalKey]; + var scopeValue = parsedConfig[ScopeKey]; + + var issuedAt = DateTimeOffset.UtcNow; + var expiresAt = issuedAt.AddSeconds(5); // setting a low value to show the token refresh in action. + + var header = new + { + alg = "none", + typ = "JWT" + }; + + var payload = new Dictionary + { + {principalClaimName, principal}, + {"iat", issuedAt.ToUnixTimeSeconds()}, + {"exp", expiresAt.ToUnixTimeSeconds()}, + {ScopeKey, scopeValue} + }; + + var headerJson = JsonConvert.SerializeObject(header); + var payloadJson = JsonConvert.SerializeObject(payload); + + return new OAuthBearerToken + { + TokenValue = $"{ToUnpaddedBase64(headerJson)}.{ToUnpaddedBase64(payloadJson)}.", + Expiration = expiresAt.ToUnixTimeMilliseconds(), + Principal = principal, + }; + } + } + +} diff --git a/examples/OAuthOIDC/Program.cs b/examples/OAuthOIDC/Program.cs index bb762bae8..d4455aed0 100644 --- a/examples/OAuthOIDC/Program.cs +++ b/examples/OAuthOIDC/Program.cs @@ -1,155 +1,155 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; -using System.Collections.Generic; -using System.Text; -using System.Text.RegularExpressions; -using System.Threading; -using System.Threading.Tasks; -using Confluent.Kafka; -using Confluent.Kafka.Admin; -using Newtonsoft.Json; - -/// -/// An example demonstrating how to produce a message to -/// a topic, and then reading it back again using a consumer. -/// The authentication uses the OpenID Connect method of the OAUTHBEARER SASL mechanism. -/// -namespace Confluent.Kafka.Examples.OAuthOIDC -{ - public class Program - { - private const String OAuthBearerClientId = ""; - private const String OAuthBearerClientSecret = ""; - private const String OAuthBearerTokenEndpointURL = ""; - private const String OAuthBearerScope = ""; - public static async Task Main(string[] args) - { - if (args.Length != 2) - { - Console.WriteLine("Usage: .. brokerList"); - return; - } - var bootstrapServers = args[1]; - var topicName = Guid.NewGuid().ToString(); - var groupId = Guid.NewGuid().ToString(); - - var commonConfig = new ClientConfig - { - BootstrapServers = bootstrapServers, - SecurityProtocol = SecurityProtocol.SaslPlaintext, - SaslMechanism = SaslMechanism.OAuthBearer, - SaslOauthbearerMethod = SaslOauthbearerMethod.Oidc, - SaslOauthbearerClientId = OAuthBearerClientId, - SaslOauthbearerClientSecret = OAuthBearerClientSecret, - SaslOauthbearerTokenEndpointUrl = OAuthBearerTokenEndpointURL, - SaslOauthbearerScope = OAuthBearerScope - }; - - var consumerConfig = new ConsumerConfig - { - BootstrapServers = bootstrapServers, - SecurityProtocol = SecurityProtocol.SaslPlaintext, - SaslMechanism = SaslMechanism.OAuthBearer, - SaslOauthbearerMethod = SaslOauthbearerMethod.Oidc, - SaslOauthbearerClientId = OAuthBearerClientId, - SaslOauthbearerClientSecret = OAuthBearerClientSecret, - SaslOauthbearerTokenEndpointUrl = OAuthBearerTokenEndpointURL, - SaslOauthbearerScope = OAuthBearerScope, - GroupId = groupId, - AutoOffsetReset = AutoOffsetReset.Earliest, - EnableAutoOffsetStore = false - }; - - try - { - createTopic(commonConfig, topicName); - } - catch (CreateTopicsException e) - { - Console.WriteLine($"An error occurred creating topic {e.Results[0].Topic}: {e.Results[0].Error.Reason}"); - Environment.Exit(1); - } - - using (var producer = new ProducerBuilder(commonConfig).Build()) - using (var consumer = new ConsumerBuilder(consumerConfig).Build()) - { - consumer.Subscribe(topicName); - - var cancelled = false; - CancellationTokenSource cts = new CancellationTokenSource(); - - Console.CancelKeyPress += (_, e) => - { - e.Cancel = true; // prevent the process from terminating. - cancelled = true; - cts.Cancel(); - }; - - try - { - while (!cancelled) - { - var msg = Guid.NewGuid().ToString(); - try - { - var deliveryReport = await producer.ProduceAsync(topicName, new Message { Value = msg }); - Console.WriteLine($"Produced message to {deliveryReport.TopicPartitionOffset}, {msg}"); - } - catch (ProduceException e) - { - Console.WriteLine($"failed to deliver message: {e.Message} [{e.Error.Code}]"); - } - - try - { - var consumeResult = consumer.Consume(cts.Token); - Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: {consumeResult.Message.Value}"); - try - { - consumer.StoreOffset(consumeResult); - } - catch (KafkaException e) - { - Console.WriteLine($"Store Offset error: {e.Error.Reason}"); - } - } - catch (ConsumeException e) - { - Console.WriteLine($"Consume error: {e.Error.Reason}"); - } - } - } - catch (OperationCanceledException) - { - Console.WriteLine("Closing consumer."); - consumer.Close(); - } - } - } - - private static void createTopic(ClientConfig config, String topicName) - { - using (var adminClient = new AdminClientBuilder(config).Build()) - { - adminClient.CreateTopicsAsync(new TopicSpecification[] { - new TopicSpecification { Name = topicName, ReplicationFactor = 3, NumPartitions = 1 } }).Wait(); ; - } - } - } - -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; +using System.Collections.Generic; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading; +using System.Threading.Tasks; +using Confluent.Kafka; +using Confluent.Kafka.Admin; +using Newtonsoft.Json; + +/// +/// An example demonstrating how to produce a message to +/// a topic, and then reading it back again using a consumer. +/// The authentication uses the OpenID Connect method of the OAUTHBEARER SASL mechanism. +/// +namespace Confluent.Kafka.Examples.OAuthOIDC +{ + public class Program + { + private const String OAuthBearerClientId = ""; + private const String OAuthBearerClientSecret = ""; + private const String OAuthBearerTokenEndpointURL = ""; + private const String OAuthBearerScope = ""; + public static async Task Main(string[] args) + { + if (args.Length != 2) + { + Console.WriteLine("Usage: .. brokerList"); + return; + } + var bootstrapServers = args[1]; + var topicName = Guid.NewGuid().ToString(); + var groupId = Guid.NewGuid().ToString(); + + var commonConfig = new ClientConfig + { + BootstrapServers = bootstrapServers, + SecurityProtocol = SecurityProtocol.SaslPlaintext, + SaslMechanism = SaslMechanism.OAuthBearer, + SaslOauthbearerMethod = SaslOauthbearerMethod.Oidc, + SaslOauthbearerClientId = OAuthBearerClientId, + SaslOauthbearerClientSecret = OAuthBearerClientSecret, + SaslOauthbearerTokenEndpointUrl = OAuthBearerTokenEndpointURL, + SaslOauthbearerScope = OAuthBearerScope + }; + + var consumerConfig = new ConsumerConfig + { + BootstrapServers = bootstrapServers, + SecurityProtocol = SecurityProtocol.SaslPlaintext, + SaslMechanism = SaslMechanism.OAuthBearer, + SaslOauthbearerMethod = SaslOauthbearerMethod.Oidc, + SaslOauthbearerClientId = OAuthBearerClientId, + SaslOauthbearerClientSecret = OAuthBearerClientSecret, + SaslOauthbearerTokenEndpointUrl = OAuthBearerTokenEndpointURL, + SaslOauthbearerScope = OAuthBearerScope, + GroupId = groupId, + AutoOffsetReset = AutoOffsetReset.Earliest, + EnableAutoOffsetStore = false + }; + + try + { + createTopic(commonConfig, topicName); + } + catch (CreateTopicsException e) + { + Console.WriteLine($"An error occurred creating topic {e.Results[0].Topic}: {e.Results[0].Error.Reason}"); + Environment.Exit(1); + } + + using (var producer = new ProducerBuilder(commonConfig).Build()) + using (var consumer = new ConsumerBuilder(consumerConfig).Build()) + { + consumer.Subscribe(topicName); + + var cancelled = false; + CancellationTokenSource cts = new CancellationTokenSource(); + + Console.CancelKeyPress += (_, e) => + { + e.Cancel = true; // prevent the process from terminating. + cancelled = true; + cts.Cancel(); + }; + + try + { + while (!cancelled) + { + var msg = Guid.NewGuid().ToString(); + try + { + var deliveryReport = await producer.ProduceAsync(topicName, new Message { Value = msg }); + Console.WriteLine($"Produced message to {deliveryReport.TopicPartitionOffset}, {msg}"); + } + catch (ProduceException e) + { + Console.WriteLine($"failed to deliver message: {e.Message} [{e.Error.Code}]"); + } + + try + { + var consumeResult = consumer.Consume(cts.Token); + Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: {consumeResult.Message.Value}"); + try + { + consumer.StoreOffset(consumeResult); + } + catch (KafkaException e) + { + Console.WriteLine($"Store Offset error: {e.Error.Reason}"); + } + } + catch (ConsumeException e) + { + Console.WriteLine($"Consume error: {e.Error.Reason}"); + } + } + } + catch (OperationCanceledException) + { + Console.WriteLine("Closing consumer."); + consumer.Close(); + } + } + } + + private static void createTopic(ClientConfig config, String topicName) + { + using (var adminClient = new AdminClientBuilder(config).Build()) + { + adminClient.CreateTopicsAsync(new TopicSpecification[] { + new TopicSpecification { Name = topicName, ReplicationFactor = 3, NumPartitions = 1 } }).Wait(); ; + } + } + } + +} diff --git a/examples/OAuthProducer/Program.cs b/examples/OAuthProducer/Program.cs index 48759b0fc..23bbdb184 100644 --- a/examples/OAuthProducer/Program.cs +++ b/examples/OAuthProducer/Program.cs @@ -1,179 +1,179 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; -using System.Collections.Generic; -using System.Text; -using System.Text.RegularExpressions; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; - -/// -/// An example showing producer -/// with a custom OAUTHBEARER token implementation. -/// -namespace Confluent.Kafka.Examples.OAuthProducer -{ - /// - /// A class to store the token and related properties. - /// - class OAuthBearerToken - { - public string TokenValue { get; set; } - public long Expiration { get; set; } - public String Principal { get; set; } - public Dictionary Extensions { get; set; } - } - - public class Program - { - private const String OauthConfigRegexPattern = "^(\\s*(\\w+)\\s*=\\s*(\\w+))+\\s*$"; // 1 or more name=value pairs with optional ignored whitespace - private const String OauthConfigKeyValueRegexPattern = "(\\w+)\\s*=\\s*(\\w+)"; // Extract key=value pairs from OAuth Config - private const String PrincipalClaimNameKey = "principalClaimName"; - private const String PrincipalKey = "principal"; - private const String ScopeKey = "scope"; - - - public static async Task Main(string[] args) - { - if (args.Length != 4) - { - Console.WriteLine("Usage: .. brokerList topic \"principal= scope=\""); - return; - } - string bootstrapServers = args[1]; - string topicName = args[2]; - string oauthConf = args[3]; - - if (!Regex.IsMatch(oauthConf, OauthConfigRegexPattern)) - { - Console.WriteLine("Invalid OAuth config passed."); - Environment.Exit(1); - } - - var producerConfig = new ProducerConfig - { - BootstrapServers = bootstrapServers, - SecurityProtocol = SecurityProtocol.SaslPlaintext, - SaslMechanism = SaslMechanism.OAuthBearer, - SaslOauthbearerConfig = oauthConf, - }; - - // Callback to handle OAuth bearer token refresh. It creates an unsecured JWT based on the configuration defined - // in OAuth Config and sets the token on the client for use in any future authentication attempt. - // It must be invoked whenever the client requires a token (i.e. when it first starts and when the - // previously-received token is 80% of the way to its expiration time). - void OauthCallback(IClient client, string cfg) - { - try - { - var token = retrieveUnsecuredToken(cfg); - client.OAuthBearerSetToken(token.TokenValue, token.Expiration, token.Principal); - } - catch (Exception e) - { - client.OAuthBearerSetTokenFailure(e.ToString()); - } - } - - - using (var producer = new ProducerBuilder(producerConfig) - .SetOAuthBearerTokenRefreshHandler(OauthCallback).Build()) - { - Console.WriteLine("\n-----------------------------------------------------------------------"); - Console.WriteLine($"Producer {producer.Name} producing on topic {topicName}."); - Console.WriteLine("-----------------------------------------------------------------------"); - Console.WriteLine("Ctrl-C to quit.\n"); - - var cancelled = false; - var msgCnt = 1; - Console.CancelKeyPress += (_, e) => - { - e.Cancel = true; // prevent the process from terminating. - cancelled = true; - }; - - while (!cancelled) - { - var msg = String.Format("Producer example, message #{0}", msgCnt++); - - try - { - var deliveryReport = await producer.ProduceAsync(topicName, new Message { Value = msg }); - Console.WriteLine($"Produced message to {deliveryReport.TopicPartitionOffset}, {msg}"); - } - catch (ProduceException e) - { - Console.WriteLine($"failed to deliver message: {e.Message} [{e.Error.Code}]"); - } - Thread.Sleep(1000); // sleep one second - } - } - } - - private static string ToUnpaddedBase64(string s) - => Convert.ToBase64String(Encoding.UTF8.GetBytes(s)).TrimEnd('='); - - private static OAuthBearerToken retrieveUnsecuredToken(String oauthConfig) - { - Console.WriteLine("Refreshing the token"); - - var parsedConfig = new Dictionary(); - foreach (Match match in Regex.Matches(oauthConfig, OauthConfigKeyValueRegexPattern)) - { - parsedConfig[match.Groups[1].ToString()] = match.Groups[2].ToString(); - } - - if (!parsedConfig.ContainsKey(PrincipalKey) || !parsedConfig.ContainsKey(ScopeKey) || parsedConfig.Count > 2) - { - throw new Exception($"Invalid OAuth config {oauthConfig} passed."); - } - - var principalClaimName = parsedConfig.ContainsKey(PrincipalClaimNameKey) ? parsedConfig[PrincipalClaimNameKey] : "sub"; - var principal = parsedConfig[PrincipalKey]; - var scopeValue = parsedConfig[ScopeKey]; - - var issuedAt = DateTimeOffset.UtcNow; - var expiresAt = issuedAt.AddSeconds(5); // setting a low value to show the token refresh in action. - - var header = new - { - alg = "none", - typ = "JWT" - }; - - var payload = new Dictionary - { - {principalClaimName, principal}, - {"iat", issuedAt.ToUnixTimeSeconds()}, - {"exp", expiresAt.ToUnixTimeSeconds()}, - {ScopeKey, scopeValue} - }; - - var headerJson = JsonConvert.SerializeObject(header); - var payloadJson = JsonConvert.SerializeObject(payload); - - return new OAuthBearerToken - { - TokenValue = $"{ToUnpaddedBase64(headerJson)}.{ToUnpaddedBase64(payloadJson)}.", - Expiration = expiresAt.ToUnixTimeMilliseconds(), - Principal = principal, - Extensions = new Dictionary() - }; - } - } -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; +using System.Collections.Generic; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; + +/// +/// An example showing producer +/// with a custom OAUTHBEARER token implementation. +/// +namespace Confluent.Kafka.Examples.OAuthProducer +{ + /// + /// A class to store the token and related properties. + /// + class OAuthBearerToken + { + public string TokenValue { get; set; } + public long Expiration { get; set; } + public String Principal { get; set; } + public Dictionary Extensions { get; set; } + } + + public class Program + { + private const String OauthConfigRegexPattern = "^(\\s*(\\w+)\\s*=\\s*(\\w+))+\\s*$"; // 1 or more name=value pairs with optional ignored whitespace + private const String OauthConfigKeyValueRegexPattern = "(\\w+)\\s*=\\s*(\\w+)"; // Extract key=value pairs from OAuth Config + private const String PrincipalClaimNameKey = "principalClaimName"; + private const String PrincipalKey = "principal"; + private const String ScopeKey = "scope"; + + + public static async Task Main(string[] args) + { + if (args.Length != 4) + { + Console.WriteLine("Usage: .. brokerList topic \"principal= scope=\""); + return; + } + string bootstrapServers = args[1]; + string topicName = args[2]; + string oauthConf = args[3]; + + if (!Regex.IsMatch(oauthConf, OauthConfigRegexPattern)) + { + Console.WriteLine("Invalid OAuth config passed."); + Environment.Exit(1); + } + + var producerConfig = new ProducerConfig + { + BootstrapServers = bootstrapServers, + SecurityProtocol = SecurityProtocol.SaslPlaintext, + SaslMechanism = SaslMechanism.OAuthBearer, + SaslOauthbearerConfig = oauthConf, + }; + + // Callback to handle OAuth bearer token refresh. It creates an unsecured JWT based on the configuration defined + // in OAuth Config and sets the token on the client for use in any future authentication attempt. + // It must be invoked whenever the client requires a token (i.e. when it first starts and when the + // previously-received token is 80% of the way to its expiration time). + void OauthCallback(IClient client, string cfg) + { + try + { + var token = retrieveUnsecuredToken(cfg); + client.OAuthBearerSetToken(token.TokenValue, token.Expiration, token.Principal); + } + catch (Exception e) + { + client.OAuthBearerSetTokenFailure(e.ToString()); + } + } + + + using (var producer = new ProducerBuilder(producerConfig) + .SetOAuthBearerTokenRefreshHandler(OauthCallback).Build()) + { + Console.WriteLine("\n-----------------------------------------------------------------------"); + Console.WriteLine($"Producer {producer.Name} producing on topic {topicName}."); + Console.WriteLine("-----------------------------------------------------------------------"); + Console.WriteLine("Ctrl-C to quit.\n"); + + var cancelled = false; + var msgCnt = 1; + Console.CancelKeyPress += (_, e) => + { + e.Cancel = true; // prevent the process from terminating. + cancelled = true; + }; + + while (!cancelled) + { + var msg = String.Format("Producer example, message #{0}", msgCnt++); + + try + { + var deliveryReport = await producer.ProduceAsync(topicName, new Message { Value = msg }); + Console.WriteLine($"Produced message to {deliveryReport.TopicPartitionOffset}, {msg}"); + } + catch (ProduceException e) + { + Console.WriteLine($"failed to deliver message: {e.Message} [{e.Error.Code}]"); + } + Thread.Sleep(1000); // sleep one second + } + } + } + + private static string ToUnpaddedBase64(string s) + => Convert.ToBase64String(Encoding.UTF8.GetBytes(s)).TrimEnd('='); + + private static OAuthBearerToken retrieveUnsecuredToken(String oauthConfig) + { + Console.WriteLine("Refreshing the token"); + + var parsedConfig = new Dictionary(); + foreach (Match match in Regex.Matches(oauthConfig, OauthConfigKeyValueRegexPattern)) + { + parsedConfig[match.Groups[1].ToString()] = match.Groups[2].ToString(); + } + + if (!parsedConfig.ContainsKey(PrincipalKey) || !parsedConfig.ContainsKey(ScopeKey) || parsedConfig.Count > 2) + { + throw new Exception($"Invalid OAuth config {oauthConfig} passed."); + } + + var principalClaimName = parsedConfig.ContainsKey(PrincipalClaimNameKey) ? parsedConfig[PrincipalClaimNameKey] : "sub"; + var principal = parsedConfig[PrincipalKey]; + var scopeValue = parsedConfig[ScopeKey]; + + var issuedAt = DateTimeOffset.UtcNow; + var expiresAt = issuedAt.AddSeconds(5); // setting a low value to show the token refresh in action. + + var header = new + { + alg = "none", + typ = "JWT" + }; + + var payload = new Dictionary + { + {principalClaimName, principal}, + {"iat", issuedAt.ToUnixTimeSeconds()}, + {"exp", expiresAt.ToUnixTimeSeconds()}, + {ScopeKey, scopeValue} + }; + + var headerJson = JsonConvert.SerializeObject(header); + var payloadJson = JsonConvert.SerializeObject(payload); + + return new OAuthBearerToken + { + TokenValue = $"{ToUnpaddedBase64(headerJson)}.{ToUnpaddedBase64(payloadJson)}.", + Expiration = expiresAt.ToUnixTimeMilliseconds(), + Principal = principal, + Extensions = new Dictionary() + }; + } + } +} diff --git a/examples/Producer/Program.cs b/examples/Producer/Program.cs index 682e9e2ce..e5171f491 100644 --- a/examples/Producer/Program.cs +++ b/examples/Producer/Program.cs @@ -53,7 +53,8 @@ public static async Task Main(string[] args) Console.WriteLine("Ctrl-C to quit.\n"); var cancelled = false; - Console.CancelKeyPress += (_, e) => { + Console.CancelKeyPress += (_, e) => + { e.Cancel = true; // prevent the process from terminating. cancelled = true; }; diff --git a/examples/Protobuf/Program.cs b/examples/Protobuf/Program.cs index ebceef50b..9996601b7 100644 --- a/examples/Protobuf/Program.cs +++ b/examples/Protobuf/Program.cs @@ -1,124 +1,124 @@ -// Copyright 2018-2020 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using Confluent.Kafka.SyncOverAsync; -using Confluent.SchemaRegistry; -using Confluent.SchemaRegistry.Serdes; -using System; -using System.Threading; -using System.Threading.Tasks; - - -/// -/// An example of working with protobuf serialized data and -/// Confluent Schema Registry (v5.5 or later required for -/// Protobuf schema support). -/// -namespace Confluent.Kafka.Examples.Protobuf -{ - class Program - { - static async Task Main(string[] args) - { - if (args.Length != 3) - { - Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName"); - return; - } - - string bootstrapServers = args[0]; - string schemaRegistryUrl = args[1]; - string topicName = args[2]; - - var producerConfig = new ProducerConfig - { - BootstrapServers = bootstrapServers - }; - - var schemaRegistryConfig = new SchemaRegistryConfig - { - // Note: you can specify more than one schema registry url using the - // schema.registry.url property for redundancy (comma separated list). - // The property name is not plural to follow the convention set by - // the Java implementation. - Url = schemaRegistryUrl, - }; - - var consumerConfig = new ConsumerConfig - { - BootstrapServers = bootstrapServers, - GroupId = "protobuf-example-consumer-group" - }; - - CancellationTokenSource cts = new CancellationTokenSource(); - var consumeTask = Task.Run(() => - { - using (var consumer = - new ConsumerBuilder(consumerConfig) - .SetValueDeserializer(new ProtobufDeserializer().AsSyncOverAsync()) - .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) - .Build()) - { - consumer.Subscribe(topicName); - - try - { - while (true) - { - try - { - var consumeResult = consumer.Consume(cts.Token); - var user = consumeResult.Message.Value; - Console.WriteLine($"key: {consumeResult.Message.Key} user name: {user.Name}, favorite number: {user.FavoriteNumber}, favorite color: {user.FavoriteColor}"); - } - catch (ConsumeException e) - { - Console.WriteLine($"Consume error: {e.Error.Reason}"); - } - } - } - catch (OperationCanceledException) - { - consumer.Close(); - } - } - }); - - using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) - using (var producer = - new ProducerBuilder(producerConfig) - .SetValueSerializer(new ProtobufSerializer(schemaRegistry)) - .Build()) - { - Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); - - long i = 1; - string text; - while ((text = Console.ReadLine()) != "q") - { - User user = new User { Name = text, FavoriteColor = "green", FavoriteNumber = i++ }; - await producer - .ProduceAsync(topicName, new Message { Key = text, Value = user }) - .ContinueWith(task => task.IsFaulted - ? $"error producing message: {task.Exception.Message}" - : $"produced to: {task.Result.TopicPartitionOffset}"); - } - } - - cts.Cancel(); - } - } -} +// Copyright 2018-2020 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using Confluent.Kafka.SyncOverAsync; +using Confluent.SchemaRegistry; +using Confluent.SchemaRegistry.Serdes; +using System; +using System.Threading; +using System.Threading.Tasks; + + +/// +/// An example of working with protobuf serialized data and +/// Confluent Schema Registry (v5.5 or later required for +/// Protobuf schema support). +/// +namespace Confluent.Kafka.Examples.Protobuf +{ + class Program + { + static async Task Main(string[] args) + { + if (args.Length != 3) + { + Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName"); + return; + } + + string bootstrapServers = args[0]; + string schemaRegistryUrl = args[1]; + string topicName = args[2]; + + var producerConfig = new ProducerConfig + { + BootstrapServers = bootstrapServers + }; + + var schemaRegistryConfig = new SchemaRegistryConfig + { + // Note: you can specify more than one schema registry url using the + // schema.registry.url property for redundancy (comma separated list). + // The property name is not plural to follow the convention set by + // the Java implementation. + Url = schemaRegistryUrl, + }; + + var consumerConfig = new ConsumerConfig + { + BootstrapServers = bootstrapServers, + GroupId = "protobuf-example-consumer-group" + }; + + CancellationTokenSource cts = new CancellationTokenSource(); + var consumeTask = Task.Run(() => + { + using (var consumer = + new ConsumerBuilder(consumerConfig) + .SetValueDeserializer(new ProtobufDeserializer().AsSyncOverAsync()) + .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) + .Build()) + { + consumer.Subscribe(topicName); + + try + { + while (true) + { + try + { + var consumeResult = consumer.Consume(cts.Token); + var user = consumeResult.Message.Value; + Console.WriteLine($"key: {consumeResult.Message.Key} user name: {user.Name}, favorite number: {user.FavoriteNumber}, favorite color: {user.FavoriteColor}"); + } + catch (ConsumeException e) + { + Console.WriteLine($"Consume error: {e.Error.Reason}"); + } + } + } + catch (OperationCanceledException) + { + consumer.Close(); + } + } + }); + + using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) + using (var producer = + new ProducerBuilder(producerConfig) + .SetValueSerializer(new ProtobufSerializer(schemaRegistry)) + .Build()) + { + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); + + long i = 1; + string text; + while ((text = Console.ReadLine()) != "q") + { + User user = new User { Name = text, FavoriteColor = "green", FavoriteNumber = i++ }; + await producer + .ProduceAsync(topicName, new Message { Key = text, Value = user }) + .ContinueWith(task => task.IsFaulted + ? $"error producing message: {task.Exception.Message}" + : $"produced to: {task.Result.TopicPartitionOffset}"); + } + } + + cts.Cancel(); + } + } +} diff --git a/examples/ProtobufEncryption/Program.cs b/examples/ProtobufEncryption/Program.cs index e9c34df10..1772f3ded 100644 --- a/examples/ProtobufEncryption/Program.cs +++ b/examples/ProtobufEncryption/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2018-2024 Confluent Inc. +// Copyright 2018-2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -69,7 +69,7 @@ message User { int64 FavoriteNumber = 2; string FavoriteColor = 3; }"; - + var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers @@ -159,7 +159,7 @@ message User { .Build()) { await schemaRegistry.RegisterSchemaAsync(subjectName, schema, true); - + Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); long i = 1; diff --git a/src/ConfigGen/Program.cs b/src/ConfigGen/Program.cs index 66b3e853f..5bdbd7d53 100644 --- a/src/ConfigGen/Program.cs +++ b/src/ConfigGen/Program.cs @@ -1,751 +1,753 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net.Http; -using System.Text.RegularExpressions; -using System.Threading.Tasks; - - -namespace ConfigGen -{ - internal class MappingConfiguration - { - /// - /// librdkafka _RK_C_S2I properties are automatically interpreted as enums, however - /// _RK_C_STR properties with discrete set of allowed values are not. Enum values for - /// these property types are specified here. - /// - /// - /// sasl.mechanisms is an awkward case because the values contain '-' characters (and - /// there are other values that contain the '_' character, so can't 1:1 map with this). - /// This type is defined by hand later. - /// - internal static Dictionary> AdditionalEnums => new Dictionary> - { - { "partition.assignment.strategy", new List { "range", "roundrobin", "cooperative-sticky" } }, - { "partitioner", new List { "random", "consistent", "consistent_random", "murmur2", "murmur2_random" } } - }; - - /// - /// A function that filters out properties from the librdkafka list that should - /// not be automatically extracted. - /// - internal static List RemoveLegacyOrNotRelevant(List props) - => props.Where(p => { - // handled as a special case. - if (p.Name == "sasl.mechanisms") { return false; } - if (p.Name == "sasl.mechanism") { return false; } - if (p.Name == "acks") { return false; } - if (p.Name == "request.required.acks") { return false; } - // legacy - if (p.Name == "consume.callback.max.messages") { return false; } - if (p.Name == "offset.store.method") { return false; } - if (p.Name == "offset.store.path") { return false; } - if (p.Name == "offset.store.sync.interval.ms") { return false; } - if (p.Name == "builtin.features") { return false; } - if (p.Name == "produce.offset.report") { return false; } - if (p.Name == "delivery.report.only.error") { return false; } - if (p.Name == "topic.metadata.refresh.fast.cnt") { return false; } - if (p.Name == "reconnect.backoff.jitter.ms") { return false; } - if (p.Name == "socket.blocking.max.ms") { return false; } - if (p.Name == "auto.commit.interval.ms" && !p.IsGlobal) { return false; } - if (p.Name == "enable.auto.commit" && !p.IsGlobal) { return false; } - if (p.Name == "auto.commit.enable" && !p.IsGlobal) { return false; } - if (p.Name == "queuing.strategy") { return false; } - // other - if (p.Name.Contains("_")) { return false; } - return true; - }).ToList(); - - /// - /// A dictionary of synonym config properties. The key is included in the config - /// classes, the value is not. - /// - internal static Dictionary PreferredNames => - new Dictionary - { - { "bootstrap.servers", "metadata.broker.list" }, - { "max.in.flight", "max.in.flight.requests.per.connection" }, - { "max.partition.fetch.bytes", "fetch.message.max.bytes" }, - { "linger.ms", "queue.buffering.max.ms" }, - { "message.send.max.retries", "retries" }, - { "compression.type", "compression.codec" } - }; - - /// - /// SaslMechanism definition - /// - internal static string SaslMechanismEnumString => -@" - /// - /// SaslMechanism enum values - /// - public enum SaslMechanism - { - /// - /// GSSAPI - /// - Gssapi, - - /// - /// PLAIN - /// - Plain, - - /// - /// SCRAM-SHA-256 - /// - ScramSha256, - - /// - /// SCRAM-SHA-512 - /// - ScramSha512, - - /// - /// OAUTHBEARER - /// - OAuthBearer - } -"; - - /// - /// get/set for SaslMechanism. - /// - internal static string SaslMechanismGetSetString => -@" - /// - /// SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name, you may not configure more than one mechanism. - /// - public SaslMechanism? SaslMechanism - { - get - { - var r = Get(""sasl.mechanism""); - if (r == null) { return null; } - if (r == ""GSSAPI"") { return Confluent.Kafka.SaslMechanism.Gssapi; } - if (r == ""PLAIN"") { return Confluent.Kafka.SaslMechanism.Plain; } - if (r == ""SCRAM-SHA-256"") { return Confluent.Kafka.SaslMechanism.ScramSha256; } - if (r == ""SCRAM-SHA-512"") { return Confluent.Kafka.SaslMechanism.ScramSha512; } - if (r == ""OAUTHBEARER"") { return Confluent.Kafka.SaslMechanism.OAuthBearer; } - throw new ArgumentException($""Unknown sasl.mechanism value {r}""); - } - set - { - if (value == null) { this.properties.Remove(""sasl.mechanism""); } - else if (value == Confluent.Kafka.SaslMechanism.Gssapi) { this.properties[""sasl.mechanism""] = ""GSSAPI""; } - else if (value == Confluent.Kafka.SaslMechanism.Plain) { this.properties[""sasl.mechanism""] = ""PLAIN""; } - else if (value == Confluent.Kafka.SaslMechanism.ScramSha256) { this.properties[""sasl.mechanism""] = ""SCRAM-SHA-256""; } - else if (value == Confluent.Kafka.SaslMechanism.ScramSha512) { this.properties[""sasl.mechanism""] = ""SCRAM-SHA-512""; } - else if (value == Confluent.Kafka.SaslMechanism.OAuthBearer) { this.properties[""sasl.mechanism""] = ""OAUTHBEARER""; } - else throw new ArgumentException($""Unknown sasl.mechanism value {value}""); - } - } - -"; - - - /// - /// SaslMechanism definition - /// - internal static string AcksEnumString => -@" - /// - /// Acks enum values - /// - public enum Acks : int - { - /// - /// None - /// - None = 0, - - /// - /// Leader - /// - Leader = 1, - - /// - /// All - /// - All = -1 - } -"; - - /// - /// get/set for Acks. - /// - internal static string AcksGetSetString => -@" - /// - /// This field indicates the number of acknowledgements the leader broker must receive from ISR brokers - /// before responding to the request: Zero=Broker does not send any response/ack to client, One=The - /// leader will write the record to its local log but will respond without awaiting full acknowledgement - /// from all followers. All=Broker will block until message is committed by all in sync replicas (ISRs). - /// If there are less than min.insync.replicas (broker configuration) in the ISR set the produce request - /// will fail. - /// - public Acks? Acks - { - get - { - var r = Get(""acks""); - if (r == null) { return null; } - if (r == ""0"") { return Confluent.Kafka.Acks.None; } - if (r == ""1"") { return Confluent.Kafka.Acks.Leader; } - if (r == ""-1"" || r == ""all"") { return Confluent.Kafka.Acks.All; } - return (Acks)(int.Parse(r)); - } - set - { - if (value == null) { this.properties.Remove(""acks""); } - else if (value == Confluent.Kafka.Acks.None) { this.properties[""acks""] = ""0""; } - else if (value == Confluent.Kafka.Acks.Leader) { this.properties[""acks""] = ""1""; } - else if (value == Confluent.Kafka.Acks.All) { this.properties[""acks""] = ""-1""; } - else { this.properties[""acks""] = ((int)value.Value).ToString(); } - } - } - -"; - - } - - - class PropertySpecification : IComparable - { - public PropertySpecification() {} - - public PropertySpecification(PropertySpecification other) - { - IsGlobal = other.IsGlobal; - Name = other.Name; - CPorA = other.CPorA; - Range = other.Range; - Importance = other.Importance; - Default = other.Default; - Description = other.Description; - Type = other.Type; - AliasFor = other.AliasFor; - } - - public bool IsGlobal { get; set; } - public string Name { get; set; } - public string CPorA { get; set; } // Consumer, Producer or All. - public string Range { get; set; } - public string Importance { get; set; } - public string Default { get; set; } - public string Description { get; set; } - public string Type { get; set; } - public string AliasFor { get; set; } - - public int CompareTo(object obj) - => Name.CompareTo(((PropertySpecification)obj).Name); - } - - class Program - { - static string parseType(string type) - { - if (type == "string") { return "string"; } - if (type == "integer") { return "int"; } - if (type == "boolean") { return "bool"; } - if (type == "enum value") { return "enum"; } - if (type == "CSV flags") { return "string"; } - if (type == "pattern list") { return "string"; } - if (type == "float") { return "double"; } - if (type == "pointer") { return "pointer"; } - if (type == "") { return "pointer"; } - if (type == "see dedicated API") { return "pointer"; } - throw new Exception($"unknown type '{type}'"); - } - - static string createFileHeader(string branch) - { - return -@"// *** Auto-generated from librdkafka " + branch + @" *** - do not modify manually. -// -// Copyright 2018-2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the 'License'); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an 'AS IS' BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - - -namespace Confluent.Kafka -{ -"; - } - - static string createFileFooter() - { - return -@"} -"; - } - - static string ConfigNameToDotnetName(string configName) - => Regex.Replace( - string.Concat( - configName.Split('.').Select(p => char.ToUpper(p[0]) + p.Substring(1))), - "_[a-z]", - m => "_" + m.Value.Substring(1).ToUpper()); - - private static Dictionary ConfigValueToEnumNameSubstitutes = new Dictionary - { - { "sasl_plaintext", "SaslPlaintext" }, - { "sasl_ssl", "SaslSsl" }, - { "consistent_random", "ConsistentRandom" }, - { "murmur2_random", "Murmur2Random"}, - { "roundrobin", "RoundRobin" }, - { "cooperative-sticky", "CooperativeSticky"}, - { "read_uncommitted", "ReadUncommitted" }, - { "read_committed", "ReadCommitted" }, - { "use_all_dns_ips", "UseAllDnsIps"}, - { "resolve_canonical_bootstrap_servers_only", "ResolveCanonicalBootstrapServersOnly"} - }; - - static string EnumNameToDotnetName(string enumName) - { - if (ConfigValueToEnumNameSubstitutes.TryGetValue(enumName, out string substitute)) - { - return substitute; - } - - var result = char.ToUpper(enumName[0]) + enumName.Substring(1); - if (result.Contains('_')) - { - Console.WriteLine($"warning: enum value contains underscore (is not consistent with .net naming standards): {enumName}"); - } - - return result; - } - - static string createProperties(IEnumerable props) - { - var codeText = ""; - foreach (var prop in props) - { - if (prop.Type == "pointer") { continue; } - var type = (prop.Type == "enum" || MappingConfiguration.AdditionalEnums.Keys.Contains(prop.Name)) ? ConfigNameToDotnetName(prop.Name) : prop.Type; - var nullableType = type == "string" ? "string" : type + "?"; - - codeText += $" /// \n"; - codeText += $" /// {prop.Description}\n"; - codeText += $" ///\n"; - codeText += $" /// default: {(prop.Default == "" ? "''" : prop.Default)}\n"; - codeText += $" /// importance: {prop.Importance}\n"; - codeText += $" /// \n"; - codeText += $" public {nullableType} {ConfigNameToDotnetName(prop.Name)} {{ get {{ return "; - switch (type) - { - case "string": - codeText += $"Get(\"{prop.Name}\")"; - break; - case "int": - codeText += $"GetInt(\"{prop.Name}\")"; - break; - case "bool": - codeText += $"GetBool(\"{prop.Name}\")"; - break; - case "double": - codeText += $"GetDouble(\"{prop.Name}\")"; - break; - default: - codeText += $"({nullableType})GetEnum(typeof({type}), \"{prop.Name}\")"; - break; - } - codeText += $"; }} set {{ this.SetObject(\"{prop.Name}\", value); }} }}\n"; - codeText += $"\n"; - } - return codeText; - } - - static string createClassFooter() - { - return -@" } - -"; - } - - static string createEnums(List props) - { - var codeText = ""; - for (int j = 0; j < props.Count(); ++j) - { - var prop = props[j]; - List vs = null; - if (prop.Type == "string") - { - vs = MappingConfiguration.AdditionalEnums[prop.Name]; - } - else - { - vs = prop.Range.Split(',').Select(v => v.Trim()).ToList(); - if (prop.Name == "auto.offset.reset") - { - // Only expose the options allowed by the Java client. - vs = new List { "Latest", "Earliest", "Error" }; - } - } - if (j != 0) { codeText += "\n"; } - codeText += $" /// \n"; - codeText += $" /// {ConfigNameToDotnetName(prop.Name)} enum values\n"; - codeText += $" /// \n"; - codeText += $" public enum {ConfigNameToDotnetName(prop.Name)}\n"; - codeText += $" {{\n"; - for (int i = 0; i < vs.Count; ++i) - { - var v = vs[i]; - var nm = EnumNameToDotnetName(v); - codeText += $" /// \n"; - codeText += $" /// {nm}\n"; - codeText += $" /// \n"; - codeText += $" {nm}{(i == vs.Count - 1 ? "" : ",\n")}\n"; - } - codeText += $" }}\n"; - } - return codeText; - } - - static string createClassHeader(string name, string docs, bool derive) - { - var codeText = "\n"; - codeText += $" /// \n"; - codeText += $" /// {docs}\n"; - codeText += $" /// \n"; - codeText += $" public class {name}{(derive ? " : ClientConfig" : " : Config")}\n"; - codeText += $" {{\n"; - return codeText; - } - - static string createClassConstructors(string name) - { - var codeText = $@" - /// - /// Initialize a new empty instance. - /// - public {name}() : base() {{ }} - - /// - /// Initialize a new instance wrapping - /// an existing instance. - /// This will change the values ""in-place"" i.e. operations on this class WILL modify the provided collection - /// - public {name}(ClientConfig config) : base(config) {{ }} - - /// - /// Initialize a new instance wrapping - /// an existing key/value pair collection. - /// This will change the values ""in-place"" i.e. operations on this class WILL modify the provided collection - /// - public {name}(IDictionary config) : base(config) {{ }} -"; - return codeText; - } - - static string createConsumerSpecific() - { - return - createClassConstructors("ConsumerConfig") + -@" - /// - /// Check if any properties have been set that have implications for - /// application logic and therefore shouldn't be set via external - /// configuration, independent of the code. Throw an ArgumentException - /// if so. - /// - public ConsumerConfig ThrowIfContainsNonUserConfigurable() - { - var toCheck = new string[] { ""enable.partition.eof"", ""partition.assignment.strategy"", ""enable.auto.commit"", ""enable.auto.offset.store"" }; - this.Where(kv => toCheck.Contains(kv.Key)).ToList() - .ForEach(kv => { throw new ArgumentException($""Consumer config property '{kv.Key}' is not user configurable.""); }); - return this; - } - - /// - /// A comma separated list of fields that may be optionally set - /// in - /// objects returned by the - /// - /// method. Disabling fields that you do not require will improve - /// throughput and reduce memory consumption. Allowed values: - /// headers, timestamp, topic, all, none - /// - /// default: all - /// importance: low - /// - public string ConsumeResultFields { set { this.SetObject(""dotnet.consumer.consume.result.fields"", value); } } - -"; - } - - static string createProducerSpecific() - { - return - createClassConstructors("ProducerConfig") + -@" - /// - /// Check if any properties have been set that have implications for - /// application logic and therefore shouldn't be set via external - /// configuration, independent of the code. Throw an ArgumentException - /// if so. - /// - /// There are currently no such Producer configuration properties - /// and this method will never throw. - /// - public ProducerConfig ThrowIfContainsNonUserConfigurable() - { - // noop - return this; - } - - /// - /// Specifies whether or not the producer should start a background poll - /// thread to receive delivery reports and event notifications. Generally, - /// this should be set to true. If set to false, you will need to call - /// the Poll function manually. - /// - /// default: true - /// importance: low - /// - public bool? EnableBackgroundPoll { get { return GetBool(""dotnet.producer.enable.background.poll""); } set { this.SetObject(""dotnet.producer.enable.background.poll"", value); } } - - /// - /// Specifies whether to enable notification of delivery reports. Typically - /// you should set this parameter to true. Set it to false for ""fire and - /// forget"" semantics and a small boost in performance. - /// - /// default: true - /// importance: low - /// - public bool? EnableDeliveryReports { get { return GetBool(""dotnet.producer.enable.delivery.reports""); } set { this.SetObject(""dotnet.producer.enable.delivery.reports"", value); } } - - /// - /// A comma separated list of fields that may be optionally set in delivery - /// reports. Disabling delivery report fields that you do not require will - /// improve maximum throughput and reduce memory usage. Allowed values: - /// key, value, timestamp, headers, status, all, none. - /// - /// default: all - /// importance: low - /// - public string DeliveryReportFields { get { return Get(""dotnet.producer.delivery.report.fields""); } set { this.SetObject(""dotnet.producer.delivery.report.fields"", value.ToString()); } } - -"; - } - - static string createAdminClientSpecific() - { - return createClassConstructors("AdminClientConfig") + -@" - /// - /// Check if any properties have been set that have implications for - /// application logic and therefore shouldn't be set via external - /// configuration, independent of the code. Throw an ArgumentException - /// if so. - /// - /// There are currently no such AdminClient configuration properties - /// and this method will never throw. - /// - public AdminClientConfig ThrowIfContainsNonUserConfigurable() - { - // noop - return this; - } - -"; - } - - static List extractAll(string configDoc) - { - var configLines = configDoc.Split('\n'); - - var props = new List(); - - bool parsingGlobal = true; - foreach (var line in configLines) - { - if (line.Contains("Topic configuration properties")) - { - parsingGlobal = false; - continue; - } - - var columns = SplitLine(line).ToArray(); - if (columns.Length != 6) { continue; } - if (columns[0].Contains("-----")) { continue; } - if (columns[0].Contains("Property")) { continue; } - - var prop = new PropertySpecification(); - prop.IsGlobal = parsingGlobal; - prop.Name = columns[0]; - prop.CPorA = columns[1]; - prop.Range = columns[2]; - prop.Default = columns[3].Replace("\\|", "|"); - prop.Importance = columns[4]; - - var desc = columns[5].Replace("\\|", "|"); - bool isAlias = desc.StartsWith("Alias"); - if (isAlias) - { - var firstIdx = desc.IndexOf('`') + 1; - prop.AliasFor = desc.Substring(firstIdx, desc.IndexOf('`', firstIdx) - desc.IndexOf('`') - 1); - } - else - { - string typePrefix = "
*Type: "; - if (desc.IndexOf(typePrefix) == -1) { throw new Exception($"Unexpected config description: {desc}"); } - prop.Description = desc.Substring(0, desc.IndexOf(typePrefix)).Trim(); - var beginIdx = desc.IndexOf(typePrefix) + typePrefix.Length; - prop.Type = parseType(desc.Substring(beginIdx, desc.LastIndexOf("*") - beginIdx)); - } - - props.Add(prop); - } - - return props; - } - - static IEnumerable SplitLine(string line) - { - if (string.IsNullOrWhiteSpace(line)) - yield break; - - int lastPipe = 0; - for (int i = 1; i < line.Length - 1; ++i) - { - if (line[i] == '|' && line[i - 1] == ' ' && line[i + 1] == ' ') - { - yield return line.Substring(lastPipe, i - lastPipe).Trim(); - lastPipe = i + 1; - } - } - yield return line.Substring(lastPipe + 1).Trim(); - } - - static List removeDuplicateTopicLevel(List props) - { - // remove topicLevel properties that are in both topic level and global. - var global = props.Where(p => p.IsGlobal).ToList(); - var topicLevel = props.Where(p => !p.IsGlobal).ToList(); - var removeTopicLevel = new List(); - foreach (var p in topicLevel) - { - if (global.Count(gp => gp.Name.Equals(p.Name)) > 0) { removeTopicLevel.Add(p.Name); } - } - props = topicLevel.Where(p => !removeTopicLevel.Contains(p.Name)).Concat(global).ToList(); - return props; - } - - static List linkAliased(List props) - { - // link up aliased properties. - var nonAlias = props.Where(p => p.AliasFor == null).ToList(); - var aliases = props.Where(p => p.AliasFor != null).ToList(); - foreach (var alias in aliases) - { - var toUpdate = nonAlias.Single(p => p.Name == alias.AliasFor && p.IsGlobal == alias.IsGlobal); - if (toUpdate.AliasFor != null) { throw new Exception("detected more than on alias for a property, not supported."); } - toUpdate.AliasFor = alias.Name; - } - props = nonAlias.ToList(); - return props; - } - - static List choosePreferredNames(List props) - { - return props.Select(p => { - if (p.AliasFor != null && MappingConfiguration.PreferredNames.ContainsKey(p.AliasFor)) - { - var af = p.AliasFor; - var n = p.Name; - p.Name = af; - p.AliasFor = n; - } - return p; - }).ToList(); - } - - static void PrintProps(IEnumerable props) - { - var props_ = props.ToArray(); - Array.Sort(props_); - Console.WriteLine(String.Join(" ", props_.Select(p => p.Name))); - } - - static async Task Main(string[] args) - { - if (args.Length != 1) - { - Console.WriteLine("usage: .. git-branch-name"); - return 1; - } - - string gitBranchName = args[0]; - string url = $"https://raw.githubusercontent.com/edenhill/librdkafka/{gitBranchName}/CONFIGURATION.md"; - var configDoc = await (await (new HttpClient()) - .GetAsync(url)) - .Content.ReadAsStringAsync(); - - var props = extractAll(configDoc); - var props2 = MappingConfiguration.RemoveLegacyOrNotRelevant(props); - var props3 = removeDuplicateTopicLevel(props2); - var props4 = props = linkAliased(props3); - var props5 = choosePreferredNames(props4); - - if (props.Count() == 0) - { - Console.WriteLine($"no properties found at url: {url}"); - return 1; - } - - Console.WriteLine($"property counts: [all: {props.Count()}, *: {props.Where(p => p.CPorA == "*").Count()}, C: {props.Where(p => p.CPorA == "C").Count()}, P: {props.Where(p => p.CPorA == "P").Count()}]."); - - var codeText = ""; - codeText += createFileHeader(gitBranchName); - codeText += createEnums(props.Where(p => p.Type == "enum" || MappingConfiguration.AdditionalEnums.Keys.Contains(p.Name)).ToList()); - codeText += MappingConfiguration.SaslMechanismEnumString; - codeText += MappingConfiguration.AcksEnumString; - codeText += createClassHeader("ClientConfig", "Configuration common to all clients", false); - codeText += createClassConstructors("ClientConfig"); - codeText += MappingConfiguration.SaslMechanismGetSetString; - codeText += MappingConfiguration.AcksGetSetString; - codeText += createProperties(props.Where(p => p.CPorA == "*")); - codeText += createClassFooter(); - codeText += createClassHeader("AdminClientConfig", "AdminClient configuration properties", true); - codeText += createAdminClientSpecific(); - codeText += createClassFooter(); - codeText += createClassHeader("ProducerConfig", "Producer configuration properties", true); - codeText += createProducerSpecific(); - codeText += createProperties(props.Where(p => p.CPorA == "P")); - codeText += createClassFooter(); - codeText += createClassHeader("ConsumerConfig", "Consumer configuration properties", true); - codeText += createConsumerSpecific(); - codeText += createProperties(props.Where(p => p.CPorA == "C")); - codeText += createClassFooter(); - codeText += createFileFooter(); - - if (!Directory.Exists("out")) { Directory.CreateDirectory("out"); } - File.WriteAllText("out/Config_gen.cs", codeText); - - return 0; - } - } -} +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net.Http; +using System.Text.RegularExpressions; +using System.Threading.Tasks; + + +namespace ConfigGen +{ + internal class MappingConfiguration + { + /// + /// librdkafka _RK_C_S2I properties are automatically interpreted as enums, however + /// _RK_C_STR properties with discrete set of allowed values are not. Enum values for + /// these property types are specified here. + /// + /// + /// sasl.mechanisms is an awkward case because the values contain '-' characters (and + /// there are other values that contain the '_' character, so can't 1:1 map with this). + /// This type is defined by hand later. + /// + internal static Dictionary> AdditionalEnums => new Dictionary> + { + { "partition.assignment.strategy", new List { "range", "roundrobin", "cooperative-sticky" } }, + { "partitioner", new List { "random", "consistent", "consistent_random", "murmur2", "murmur2_random" } } + }; + + /// + /// A function that filters out properties from the librdkafka list that should + /// not be automatically extracted. + /// + internal static List RemoveLegacyOrNotRelevant(List props) + => props.Where(p => + { + // handled as a special case. + if (p.Name == "sasl.mechanisms") { return false; } + if (p.Name == "sasl.mechanism") { return false; } + if (p.Name == "acks") { return false; } + if (p.Name == "request.required.acks") { return false; } + // legacy + if (p.Name == "consume.callback.max.messages") { return false; } + if (p.Name == "offset.store.method") { return false; } + if (p.Name == "offset.store.path") { return false; } + if (p.Name == "offset.store.sync.interval.ms") { return false; } + if (p.Name == "builtin.features") { return false; } + if (p.Name == "produce.offset.report") { return false; } + if (p.Name == "delivery.report.only.error") { return false; } + if (p.Name == "topic.metadata.refresh.fast.cnt") { return false; } + if (p.Name == "reconnect.backoff.jitter.ms") { return false; } + if (p.Name == "socket.blocking.max.ms") { return false; } + if (p.Name == "auto.commit.interval.ms" && !p.IsGlobal) { return false; } + if (p.Name == "enable.auto.commit" && !p.IsGlobal) { return false; } + if (p.Name == "auto.commit.enable" && !p.IsGlobal) { return false; } + if (p.Name == "queuing.strategy") { return false; } + // other + if (p.Name.Contains("_")) { return false; } + return true; + }).ToList(); + + /// + /// A dictionary of synonym config properties. The key is included in the config + /// classes, the value is not. + /// + internal static Dictionary PreferredNames => + new Dictionary + { + { "bootstrap.servers", "metadata.broker.list" }, + { "max.in.flight", "max.in.flight.requests.per.connection" }, + { "max.partition.fetch.bytes", "fetch.message.max.bytes" }, + { "linger.ms", "queue.buffering.max.ms" }, + { "message.send.max.retries", "retries" }, + { "compression.type", "compression.codec" } + }; + + /// + /// SaslMechanism definition + /// + internal static string SaslMechanismEnumString => +@" + /// + /// SaslMechanism enum values + /// + public enum SaslMechanism + { + /// + /// GSSAPI + /// + Gssapi, + + /// + /// PLAIN + /// + Plain, + + /// + /// SCRAM-SHA-256 + /// + ScramSha256, + + /// + /// SCRAM-SHA-512 + /// + ScramSha512, + + /// + /// OAUTHBEARER + /// + OAuthBearer + } +"; + + /// + /// get/set for SaslMechanism. + /// + internal static string SaslMechanismGetSetString => +@" + /// + /// SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name, you may not configure more than one mechanism. + /// + public SaslMechanism? SaslMechanism + { + get + { + var r = Get(""sasl.mechanism""); + if (r == null) { return null; } + if (r == ""GSSAPI"") { return Confluent.Kafka.SaslMechanism.Gssapi; } + if (r == ""PLAIN"") { return Confluent.Kafka.SaslMechanism.Plain; } + if (r == ""SCRAM-SHA-256"") { return Confluent.Kafka.SaslMechanism.ScramSha256; } + if (r == ""SCRAM-SHA-512"") { return Confluent.Kafka.SaslMechanism.ScramSha512; } + if (r == ""OAUTHBEARER"") { return Confluent.Kafka.SaslMechanism.OAuthBearer; } + throw new ArgumentException($""Unknown sasl.mechanism value {r}""); + } + set + { + if (value == null) { this.properties.Remove(""sasl.mechanism""); } + else if (value == Confluent.Kafka.SaslMechanism.Gssapi) { this.properties[""sasl.mechanism""] = ""GSSAPI""; } + else if (value == Confluent.Kafka.SaslMechanism.Plain) { this.properties[""sasl.mechanism""] = ""PLAIN""; } + else if (value == Confluent.Kafka.SaslMechanism.ScramSha256) { this.properties[""sasl.mechanism""] = ""SCRAM-SHA-256""; } + else if (value == Confluent.Kafka.SaslMechanism.ScramSha512) { this.properties[""sasl.mechanism""] = ""SCRAM-SHA-512""; } + else if (value == Confluent.Kafka.SaslMechanism.OAuthBearer) { this.properties[""sasl.mechanism""] = ""OAUTHBEARER""; } + else throw new ArgumentException($""Unknown sasl.mechanism value {value}""); + } + } + +"; + + + /// + /// SaslMechanism definition + /// + internal static string AcksEnumString => +@" + /// + /// Acks enum values + /// + public enum Acks : int + { + /// + /// None + /// + None = 0, + + /// + /// Leader + /// + Leader = 1, + + /// + /// All + /// + All = -1 + } +"; + + /// + /// get/set for Acks. + /// + internal static string AcksGetSetString => +@" + /// + /// This field indicates the number of acknowledgements the leader broker must receive from ISR brokers + /// before responding to the request: Zero=Broker does not send any response/ack to client, One=The + /// leader will write the record to its local log but will respond without awaiting full acknowledgement + /// from all followers. All=Broker will block until message is committed by all in sync replicas (ISRs). + /// If there are less than min.insync.replicas (broker configuration) in the ISR set the produce request + /// will fail. + /// + public Acks? Acks + { + get + { + var r = Get(""acks""); + if (r == null) { return null; } + if (r == ""0"") { return Confluent.Kafka.Acks.None; } + if (r == ""1"") { return Confluent.Kafka.Acks.Leader; } + if (r == ""-1"" || r == ""all"") { return Confluent.Kafka.Acks.All; } + return (Acks)(int.Parse(r)); + } + set + { + if (value == null) { this.properties.Remove(""acks""); } + else if (value == Confluent.Kafka.Acks.None) { this.properties[""acks""] = ""0""; } + else if (value == Confluent.Kafka.Acks.Leader) { this.properties[""acks""] = ""1""; } + else if (value == Confluent.Kafka.Acks.All) { this.properties[""acks""] = ""-1""; } + else { this.properties[""acks""] = ((int)value.Value).ToString(); } + } + } + +"; + + } + + + class PropertySpecification : IComparable + { + public PropertySpecification() { } + + public PropertySpecification(PropertySpecification other) + { + IsGlobal = other.IsGlobal; + Name = other.Name; + CPorA = other.CPorA; + Range = other.Range; + Importance = other.Importance; + Default = other.Default; + Description = other.Description; + Type = other.Type; + AliasFor = other.AliasFor; + } + + public bool IsGlobal { get; set; } + public string Name { get; set; } + public string CPorA { get; set; } // Consumer, Producer or All. + public string Range { get; set; } + public string Importance { get; set; } + public string Default { get; set; } + public string Description { get; set; } + public string Type { get; set; } + public string AliasFor { get; set; } + + public int CompareTo(object obj) + => Name.CompareTo(((PropertySpecification)obj).Name); + } + + class Program + { + static string parseType(string type) + { + if (type == "string") { return "string"; } + if (type == "integer") { return "int"; } + if (type == "boolean") { return "bool"; } + if (type == "enum value") { return "enum"; } + if (type == "CSV flags") { return "string"; } + if (type == "pattern list") { return "string"; } + if (type == "float") { return "double"; } + if (type == "pointer") { return "pointer"; } + if (type == "") { return "pointer"; } + if (type == "see dedicated API") { return "pointer"; } + throw new Exception($"unknown type '{type}'"); + } + + static string createFileHeader(string branch) + { + return +@"// *** Auto-generated from librdkafka " + branch + @" *** - do not modify manually. +// +// Copyright 2018-2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the 'License'); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an 'AS IS' BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + + +namespace Confluent.Kafka +{ +"; + } + + static string createFileFooter() + { + return +@"} +"; + } + + static string ConfigNameToDotnetName(string configName) + => Regex.Replace( + string.Concat( + configName.Split('.').Select(p => char.ToUpper(p[0]) + p.Substring(1))), + "_[a-z]", + m => "_" + m.Value.Substring(1).ToUpper()); + + private static Dictionary ConfigValueToEnumNameSubstitutes = new Dictionary + { + { "sasl_plaintext", "SaslPlaintext" }, + { "sasl_ssl", "SaslSsl" }, + { "consistent_random", "ConsistentRandom" }, + { "murmur2_random", "Murmur2Random"}, + { "roundrobin", "RoundRobin" }, + { "cooperative-sticky", "CooperativeSticky"}, + { "read_uncommitted", "ReadUncommitted" }, + { "read_committed", "ReadCommitted" }, + { "use_all_dns_ips", "UseAllDnsIps"}, + { "resolve_canonical_bootstrap_servers_only", "ResolveCanonicalBootstrapServersOnly"} + }; + + static string EnumNameToDotnetName(string enumName) + { + if (ConfigValueToEnumNameSubstitutes.TryGetValue(enumName, out string substitute)) + { + return substitute; + } + + var result = char.ToUpper(enumName[0]) + enumName.Substring(1); + if (result.Contains('_')) + { + Console.WriteLine($"warning: enum value contains underscore (is not consistent with .net naming standards): {enumName}"); + } + + return result; + } + + static string createProperties(IEnumerable props) + { + var codeText = ""; + foreach (var prop in props) + { + if (prop.Type == "pointer") { continue; } + var type = (prop.Type == "enum" || MappingConfiguration.AdditionalEnums.Keys.Contains(prop.Name)) ? ConfigNameToDotnetName(prop.Name) : prop.Type; + var nullableType = type == "string" ? "string" : type + "?"; + + codeText += $" /// \n"; + codeText += $" /// {prop.Description}\n"; + codeText += $" ///\n"; + codeText += $" /// default: {(prop.Default == "" ? "''" : prop.Default)}\n"; + codeText += $" /// importance: {prop.Importance}\n"; + codeText += $" /// \n"; + codeText += $" public {nullableType} {ConfigNameToDotnetName(prop.Name)} {{ get {{ return "; + switch (type) + { + case "string": + codeText += $"Get(\"{prop.Name}\")"; + break; + case "int": + codeText += $"GetInt(\"{prop.Name}\")"; + break; + case "bool": + codeText += $"GetBool(\"{prop.Name}\")"; + break; + case "double": + codeText += $"GetDouble(\"{prop.Name}\")"; + break; + default: + codeText += $"({nullableType})GetEnum(typeof({type}), \"{prop.Name}\")"; + break; + } + codeText += $"; }} set {{ this.SetObject(\"{prop.Name}\", value); }} }}\n"; + codeText += $"\n"; + } + return codeText; + } + + static string createClassFooter() + { + return +@" } + +"; + } + + static string createEnums(List props) + { + var codeText = ""; + for (int j = 0; j < props.Count(); ++j) + { + var prop = props[j]; + List vs = null; + if (prop.Type == "string") + { + vs = MappingConfiguration.AdditionalEnums[prop.Name]; + } + else + { + vs = prop.Range.Split(',').Select(v => v.Trim()).ToList(); + if (prop.Name == "auto.offset.reset") + { + // Only expose the options allowed by the Java client. + vs = new List { "Latest", "Earliest", "Error" }; + } + } + if (j != 0) { codeText += "\n"; } + codeText += $" /// \n"; + codeText += $" /// {ConfigNameToDotnetName(prop.Name)} enum values\n"; + codeText += $" /// \n"; + codeText += $" public enum {ConfigNameToDotnetName(prop.Name)}\n"; + codeText += $" {{\n"; + for (int i = 0; i < vs.Count; ++i) + { + var v = vs[i]; + var nm = EnumNameToDotnetName(v); + codeText += $" /// \n"; + codeText += $" /// {nm}\n"; + codeText += $" /// \n"; + codeText += $" {nm}{(i == vs.Count - 1 ? "" : ",\n")}\n"; + } + codeText += $" }}\n"; + } + return codeText; + } + + static string createClassHeader(string name, string docs, bool derive) + { + var codeText = "\n"; + codeText += $" /// \n"; + codeText += $" /// {docs}\n"; + codeText += $" /// \n"; + codeText += $" public class {name}{(derive ? " : ClientConfig" : " : Config")}\n"; + codeText += $" {{\n"; + return codeText; + } + + static string createClassConstructors(string name) + { + var codeText = $@" + /// + /// Initialize a new empty instance. + /// + public {name}() : base() {{ }} + + /// + /// Initialize a new instance wrapping + /// an existing instance. + /// This will change the values ""in-place"" i.e. operations on this class WILL modify the provided collection + /// + public {name}(ClientConfig config) : base(config) {{ }} + + /// + /// Initialize a new instance wrapping + /// an existing key/value pair collection. + /// This will change the values ""in-place"" i.e. operations on this class WILL modify the provided collection + /// + public {name}(IDictionary config) : base(config) {{ }} +"; + return codeText; + } + + static string createConsumerSpecific() + { + return + createClassConstructors("ConsumerConfig") + +@" + /// + /// Check if any properties have been set that have implications for + /// application logic and therefore shouldn't be set via external + /// configuration, independent of the code. Throw an ArgumentException + /// if so. + /// + public ConsumerConfig ThrowIfContainsNonUserConfigurable() + { + var toCheck = new string[] { ""enable.partition.eof"", ""partition.assignment.strategy"", ""enable.auto.commit"", ""enable.auto.offset.store"" }; + this.Where(kv => toCheck.Contains(kv.Key)).ToList() + .ForEach(kv => { throw new ArgumentException($""Consumer config property '{kv.Key}' is not user configurable.""); }); + return this; + } + + /// + /// A comma separated list of fields that may be optionally set + /// in + /// objects returned by the + /// + /// method. Disabling fields that you do not require will improve + /// throughput and reduce memory consumption. Allowed values: + /// headers, timestamp, topic, all, none + /// + /// default: all + /// importance: low + /// + public string ConsumeResultFields { set { this.SetObject(""dotnet.consumer.consume.result.fields"", value); } } + +"; + } + + static string createProducerSpecific() + { + return + createClassConstructors("ProducerConfig") + +@" + /// + /// Check if any properties have been set that have implications for + /// application logic and therefore shouldn't be set via external + /// configuration, independent of the code. Throw an ArgumentException + /// if so. + /// + /// There are currently no such Producer configuration properties + /// and this method will never throw. + /// + public ProducerConfig ThrowIfContainsNonUserConfigurable() + { + // noop + return this; + } + + /// + /// Specifies whether or not the producer should start a background poll + /// thread to receive delivery reports and event notifications. Generally, + /// this should be set to true. If set to false, you will need to call + /// the Poll function manually. + /// + /// default: true + /// importance: low + /// + public bool? EnableBackgroundPoll { get { return GetBool(""dotnet.producer.enable.background.poll""); } set { this.SetObject(""dotnet.producer.enable.background.poll"", value); } } + + /// + /// Specifies whether to enable notification of delivery reports. Typically + /// you should set this parameter to true. Set it to false for ""fire and + /// forget"" semantics and a small boost in performance. + /// + /// default: true + /// importance: low + /// + public bool? EnableDeliveryReports { get { return GetBool(""dotnet.producer.enable.delivery.reports""); } set { this.SetObject(""dotnet.producer.enable.delivery.reports"", value); } } + + /// + /// A comma separated list of fields that may be optionally set in delivery + /// reports. Disabling delivery report fields that you do not require will + /// improve maximum throughput and reduce memory usage. Allowed values: + /// key, value, timestamp, headers, status, all, none. + /// + /// default: all + /// importance: low + /// + public string DeliveryReportFields { get { return Get(""dotnet.producer.delivery.report.fields""); } set { this.SetObject(""dotnet.producer.delivery.report.fields"", value.ToString()); } } + +"; + } + + static string createAdminClientSpecific() + { + return createClassConstructors("AdminClientConfig") + +@" + /// + /// Check if any properties have been set that have implications for + /// application logic and therefore shouldn't be set via external + /// configuration, independent of the code. Throw an ArgumentException + /// if so. + /// + /// There are currently no such AdminClient configuration properties + /// and this method will never throw. + /// + public AdminClientConfig ThrowIfContainsNonUserConfigurable() + { + // noop + return this; + } + +"; + } + + static List extractAll(string configDoc) + { + var configLines = configDoc.Split('\n'); + + var props = new List(); + + bool parsingGlobal = true; + foreach (var line in configLines) + { + if (line.Contains("Topic configuration properties")) + { + parsingGlobal = false; + continue; + } + + var columns = SplitLine(line).ToArray(); + if (columns.Length != 6) { continue; } + if (columns[0].Contains("-----")) { continue; } + if (columns[0].Contains("Property")) { continue; } + + var prop = new PropertySpecification(); + prop.IsGlobal = parsingGlobal; + prop.Name = columns[0]; + prop.CPorA = columns[1]; + prop.Range = columns[2]; + prop.Default = columns[3].Replace("\\|", "|"); + prop.Importance = columns[4]; + + var desc = columns[5].Replace("\\|", "|"); + bool isAlias = desc.StartsWith("Alias"); + if (isAlias) + { + var firstIdx = desc.IndexOf('`') + 1; + prop.AliasFor = desc.Substring(firstIdx, desc.IndexOf('`', firstIdx) - desc.IndexOf('`') - 1); + } + else + { + string typePrefix = "
*Type: "; + if (desc.IndexOf(typePrefix) == -1) { throw new Exception($"Unexpected config description: {desc}"); } + prop.Description = desc.Substring(0, desc.IndexOf(typePrefix)).Trim(); + var beginIdx = desc.IndexOf(typePrefix) + typePrefix.Length; + prop.Type = parseType(desc.Substring(beginIdx, desc.LastIndexOf("*") - beginIdx)); + } + + props.Add(prop); + } + + return props; + } + + static IEnumerable SplitLine(string line) + { + if (string.IsNullOrWhiteSpace(line)) + yield break; + + int lastPipe = 0; + for (int i = 1; i < line.Length - 1; ++i) + { + if (line[i] == '|' && line[i - 1] == ' ' && line[i + 1] == ' ') + { + yield return line.Substring(lastPipe, i - lastPipe).Trim(); + lastPipe = i + 1; + } + } + yield return line.Substring(lastPipe + 1).Trim(); + } + + static List removeDuplicateTopicLevel(List props) + { + // remove topicLevel properties that are in both topic level and global. + var global = props.Where(p => p.IsGlobal).ToList(); + var topicLevel = props.Where(p => !p.IsGlobal).ToList(); + var removeTopicLevel = new List(); + foreach (var p in topicLevel) + { + if (global.Count(gp => gp.Name.Equals(p.Name)) > 0) { removeTopicLevel.Add(p.Name); } + } + props = topicLevel.Where(p => !removeTopicLevel.Contains(p.Name)).Concat(global).ToList(); + return props; + } + + static List linkAliased(List props) + { + // link up aliased properties. + var nonAlias = props.Where(p => p.AliasFor == null).ToList(); + var aliases = props.Where(p => p.AliasFor != null).ToList(); + foreach (var alias in aliases) + { + var toUpdate = nonAlias.Single(p => p.Name == alias.AliasFor && p.IsGlobal == alias.IsGlobal); + if (toUpdate.AliasFor != null) { throw new Exception("detected more than on alias for a property, not supported."); } + toUpdate.AliasFor = alias.Name; + } + props = nonAlias.ToList(); + return props; + } + + static List choosePreferredNames(List props) + { + return props.Select(p => + { + if (p.AliasFor != null && MappingConfiguration.PreferredNames.ContainsKey(p.AliasFor)) + { + var af = p.AliasFor; + var n = p.Name; + p.Name = af; + p.AliasFor = n; + } + return p; + }).ToList(); + } + + static void PrintProps(IEnumerable props) + { + var props_ = props.ToArray(); + Array.Sort(props_); + Console.WriteLine(String.Join(" ", props_.Select(p => p.Name))); + } + + static async Task Main(string[] args) + { + if (args.Length != 1) + { + Console.WriteLine("usage: .. git-branch-name"); + return 1; + } + + string gitBranchName = args[0]; + string url = $"https://raw.githubusercontent.com/edenhill/librdkafka/{gitBranchName}/CONFIGURATION.md"; + var configDoc = await (await (new HttpClient()) + .GetAsync(url)) + .Content.ReadAsStringAsync(); + + var props = extractAll(configDoc); + var props2 = MappingConfiguration.RemoveLegacyOrNotRelevant(props); + var props3 = removeDuplicateTopicLevel(props2); + var props4 = props = linkAliased(props3); + var props5 = choosePreferredNames(props4); + + if (props.Count() == 0) + { + Console.WriteLine($"no properties found at url: {url}"); + return 1; + } + + Console.WriteLine($"property counts: [all: {props.Count()}, *: {props.Where(p => p.CPorA == "*").Count()}, C: {props.Where(p => p.CPorA == "C").Count()}, P: {props.Where(p => p.CPorA == "P").Count()}]."); + + var codeText = ""; + codeText += createFileHeader(gitBranchName); + codeText += createEnums(props.Where(p => p.Type == "enum" || MappingConfiguration.AdditionalEnums.Keys.Contains(p.Name)).ToList()); + codeText += MappingConfiguration.SaslMechanismEnumString; + codeText += MappingConfiguration.AcksEnumString; + codeText += createClassHeader("ClientConfig", "Configuration common to all clients", false); + codeText += createClassConstructors("ClientConfig"); + codeText += MappingConfiguration.SaslMechanismGetSetString; + codeText += MappingConfiguration.AcksGetSetString; + codeText += createProperties(props.Where(p => p.CPorA == "*")); + codeText += createClassFooter(); + codeText += createClassHeader("AdminClientConfig", "AdminClient configuration properties", true); + codeText += createAdminClientSpecific(); + codeText += createClassFooter(); + codeText += createClassHeader("ProducerConfig", "Producer configuration properties", true); + codeText += createProducerSpecific(); + codeText += createProperties(props.Where(p => p.CPorA == "P")); + codeText += createClassFooter(); + codeText += createClassHeader("ConsumerConfig", "Consumer configuration properties", true); + codeText += createConsumerSpecific(); + codeText += createProperties(props.Where(p => p.CPorA == "C")); + codeText += createClassFooter(); + codeText += createFileFooter(); + + if (!Directory.Exists("out")) { Directory.CreateDirectory("out"); } + File.WriteAllText("out/Config_gen.cs", codeText); + + return 0; + } + } +} diff --git a/src/Confluent.Kafka/Admin/AccessControlEntry.cs b/src/Confluent.Kafka/Admin/AccessControlEntry.cs index 6acba071c..91d6f2d4e 100644 --- a/src/Confluent.Kafka/Admin/AccessControlEntry.cs +++ b/src/Confluent.Kafka/Admin/AccessControlEntry.cs @@ -59,13 +59,13 @@ public AccessControlEntryFilter ToFilter() PermissionType = PermissionType }; } - + /// /// A clone of the AccessControlEntry object /// public AccessControlEntry Clone() { - return (AccessControlEntry) MemberwiseClone(); + return (AccessControlEntry)MemberwiseClone(); } /// diff --git a/src/Confluent.Kafka/Admin/AccessControlEntryFilter.cs b/src/Confluent.Kafka/Admin/AccessControlEntryFilter.cs index 94d2edb6b..cea066ccf 100644 --- a/src/Confluent.Kafka/Admin/AccessControlEntryFilter.cs +++ b/src/Confluent.Kafka/Admin/AccessControlEntryFilter.cs @@ -43,13 +43,13 @@ public class AccessControlEntryFilter /// The permission type this access control entry filter matches. /// public AclPermissionType PermissionType { get; set; } - + /// /// A clone of the AccessControlEntryFilter object /// public AccessControlEntryFilter Clone() { - return (AccessControlEntryFilter) MemberwiseClone(); + return (AccessControlEntryFilter)MemberwiseClone(); } /// diff --git a/src/Confluent.Kafka/Admin/AclBinding.cs b/src/Confluent.Kafka/Admin/AclBinding.cs index e7d88bb08..ff29f417b 100644 --- a/src/Confluent.Kafka/Admin/AclBinding.cs +++ b/src/Confluent.Kafka/Admin/AclBinding.cs @@ -52,7 +52,7 @@ public AclBindingFilter ToFilter() /// public AclBinding Clone() { - var ret = (AclBinding) MemberwiseClone(); + var ret = (AclBinding)MemberwiseClone(); ret.Pattern = ret.Pattern.Clone(); ret.Entry = ret.Entry.Clone(); return ret; diff --git a/src/Confluent.Kafka/Admin/AclBindingFilter.cs b/src/Confluent.Kafka/Admin/AclBindingFilter.cs index f359cb2ce..2ce4446b3 100644 --- a/src/Confluent.Kafka/Admin/AclBindingFilter.cs +++ b/src/Confluent.Kafka/Admin/AclBindingFilter.cs @@ -32,13 +32,13 @@ public class AclBindingFilter /// The access control entry filter. /// public AccessControlEntryFilter EntryFilter { get; set; } - + /// /// A clone of the AclBindingFilter object /// public AclBindingFilter Clone() { - var ret = (AclBindingFilter) MemberwiseClone(); + var ret = (AclBindingFilter)MemberwiseClone(); ret.PatternFilter = ret.PatternFilter.Clone(); ret.EntryFilter = ret.EntryFilter.Clone(); return ret; diff --git a/src/Confluent.Kafka/Admin/AlterConfigsException.cs b/src/Confluent.Kafka/Admin/AlterConfigsException.cs index cc6ccafe8..3e29681b2 100644 --- a/src/Confluent.Kafka/Admin/AlterConfigsException.cs +++ b/src/Confluent.Kafka/Admin/AlterConfigsException.cs @@ -38,12 +38,12 @@ public AlterConfigsException(List results) : base(new Error(ErrorCode.Local_Partial, "An error occurred altering the following resources: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.ConfigResource)) + - "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + + "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + "].")) { Results = results; } - + /// /// The result corresponding to all ConfigResources in the request /// (whether or not they were in error). At least one of these diff --git a/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsReport.cs b/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsReport.cs index b85bbf54b..9e9d9ab71 100644 --- a/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsReport.cs +++ b/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsReport.cs @@ -44,7 +44,8 @@ public class AlterConsumerGroupOffsetsReport /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { var errString = Error.IsError ? Error.ToString() : ""; return $"{Group} [ {String.Join(", ", Partitions)} ] {errString}"; } diff --git a/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsResult.cs b/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsResult.cs index 248ac4ee8..1172309e6 100644 --- a/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsResult.cs +++ b/src/Confluent.Kafka/Admin/AlterConsumerGroupOffsetsResult.cs @@ -38,7 +38,8 @@ public class AlterConsumerGroupOffsetsResult /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { return $"{Group} [ {String.Join(", ", Partitions)} ]"; } } diff --git a/src/Confluent.Kafka/Admin/AlterUserScramCredentialsReport.cs b/src/Confluent.Kafka/Admin/AlterUserScramCredentialsReport.cs index 120865306..47b52cf5c 100644 --- a/src/Confluent.Kafka/Admin/AlterUserScramCredentialsReport.cs +++ b/src/Confluent.Kafka/Admin/AlterUserScramCredentialsReport.cs @@ -41,7 +41,7 @@ public class AlterUserScramCredentialsReport /// public override string ToString() { - return $"{{\"User\": {User.Quote()}, " + + return $"{{\"User\": {User.Quote()}, " + $"\"Error\": {Error.ToString().Quote()}}}"; } } diff --git a/src/Confluent.Kafka/Admin/ConfigResource.cs b/src/Confluent.Kafka/Admin/ConfigResource.cs index 4fd94180a..66061a8dd 100644 --- a/src/Confluent.Kafka/Admin/ConfigResource.cs +++ b/src/Confluent.Kafka/Admin/ConfigResource.cs @@ -60,7 +60,7 @@ public override bool Equals(object obj) /// public override int GetHashCode() // x by prime number is quick and gives decent distribution. - => Type.GetHashCode()*251 + Name.GetHashCode(); + => Type.GetHashCode() * 251 + Name.GetHashCode(); /// /// Tests whether ConfigResource instance a is equal to ConfigResource instance b. diff --git a/src/Confluent.Kafka/Admin/CreateAclReport.cs b/src/Confluent.Kafka/Admin/CreateAclReport.cs index 0e6456c02..b87a4a148 100644 --- a/src/Confluent.Kafka/Admin/CreateAclReport.cs +++ b/src/Confluent.Kafka/Admin/CreateAclReport.cs @@ -44,7 +44,7 @@ public override bool Equals(Object obj) { return false; } - var result = (CreateAclReport) obj; + var result = (CreateAclReport)obj; if (base.Equals(result)) return true; return Error == result.Error; } diff --git a/src/Confluent.Kafka/Admin/CreateAclsException.cs b/src/Confluent.Kafka/Admin/CreateAclsException.cs index 81b4a2e4b..dc6a108b7 100644 --- a/src/Confluent.Kafka/Admin/CreateAclsException.cs +++ b/src/Confluent.Kafka/Admin/CreateAclsException.cs @@ -65,7 +65,7 @@ public override bool Equals(Object obj) { return false; } - var exception = (CreateAclsException) obj; + var exception = (CreateAclsException)obj; if (base.Equals(exception)) return true; return this.Error == exception.Error && (this.Results?.SequenceEqual(exception.Results) ?? exception.Results == null); @@ -120,7 +120,7 @@ public override int GetHashCode() if (Error != null) hash ^= Error.GetHashCode(); if (Results != null) { - foreach(CreateAclReport result in Results) + foreach (CreateAclReport result in Results) { hash ^= result.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/CreatePartitionsException.cs b/src/Confluent.Kafka/Admin/CreatePartitionsException.cs index 0a65bde4b..aabcd442d 100644 --- a/src/Confluent.Kafka/Admin/CreatePartitionsException.cs +++ b/src/Confluent.Kafka/Admin/CreatePartitionsException.cs @@ -43,7 +43,7 @@ public CreatePartitionsException(List results) { Results = results; } - + /// /// The result corresponding to all topics in the request /// (whether or not they were in error). At least one of these diff --git a/src/Confluent.Kafka/Admin/CreateTopicsOptions.cs b/src/Confluent.Kafka/Admin/CreateTopicsOptions.cs index c6a26a9b5..4d07d8802 100644 --- a/src/Confluent.Kafka/Admin/CreateTopicsOptions.cs +++ b/src/Confluent.Kafka/Admin/CreateTopicsOptions.cs @@ -52,4 +52,4 @@ public class CreateTopicsOptions /// public TimeSpan? OperationTimeout { get; set; } } -} \ No newline at end of file +} diff --git a/src/Confluent.Kafka/Admin/DeleteAclsException.cs b/src/Confluent.Kafka/Admin/DeleteAclsException.cs index f242ea957..5cd7f3c4e 100644 --- a/src/Confluent.Kafka/Admin/DeleteAclsException.cs +++ b/src/Confluent.Kafka/Admin/DeleteAclsException.cs @@ -63,7 +63,7 @@ public override bool Equals(Object obj) { return false; } - var exception = (DeleteAclsException) obj; + var exception = (DeleteAclsException)obj; if (base.Equals(exception)) return true; return Error == exception.Error && (Results?.SequenceEqual(exception.Results) ?? exception.Results == null); @@ -119,7 +119,7 @@ public override int GetHashCode() if (Error != null) hash ^= Error.GetHashCode(); if (Results != null) { - foreach(DeleteAclsReport report in Results) + foreach (DeleteAclsReport report in Results) { hash ^= report.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/DeleteAclsReport.cs b/src/Confluent.Kafka/Admin/DeleteAclsReport.cs index 13898cb27..a6286455a 100644 --- a/src/Confluent.Kafka/Admin/DeleteAclsReport.cs +++ b/src/Confluent.Kafka/Admin/DeleteAclsReport.cs @@ -51,7 +51,7 @@ public override bool Equals(Object obj) { return false; } - var report = (DeleteAclsReport) obj; + var report = (DeleteAclsReport)obj; if (base.Equals(report)) return true; return Error == report.Error && (AclBindings == null ? report.AclBindings == null : @@ -107,7 +107,7 @@ public override int GetHashCode() if (Error != null) hash ^= Error.GetHashCode(); if (AclBindings != null) { - foreach(AclBinding aclBinding in AclBindings) + foreach (AclBinding aclBinding in AclBindings) { hash ^= aclBinding.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/DeleteAclsResult.cs b/src/Confluent.Kafka/Admin/DeleteAclsResult.cs index 1cd1954a5..c739b03ea 100644 --- a/src/Confluent.Kafka/Admin/DeleteAclsResult.cs +++ b/src/Confluent.Kafka/Admin/DeleteAclsResult.cs @@ -45,7 +45,7 @@ public override bool Equals(object obj) { return false; } - var result = (DeleteAclsResult) obj; + var result = (DeleteAclsResult)obj; if (base.Equals(result)) return true; return AclBindings == null ? result.AclBindings == null : new HashSet(AclBindings).SetEquals(new HashSet(result.AclBindings)); @@ -99,7 +99,7 @@ public override int GetHashCode() int hash = 1; if (AclBindings != null) { - foreach(AclBinding aclBinding in AclBindings) + foreach (AclBinding aclBinding in AclBindings) { hash ^= aclBinding.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/DeleteGroupOptions.cs b/src/Confluent.Kafka/Admin/DeleteGroupOptions.cs index 5e18a517c..60a411e8f 100644 --- a/src/Confluent.Kafka/Admin/DeleteGroupOptions.cs +++ b/src/Confluent.Kafka/Admin/DeleteGroupOptions.cs @@ -1,47 +1,47 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; - - -namespace Confluent.Kafka.Admin -{ - /// - /// Options for the DeleteGroups method. - /// - public class DeleteGroupsOptions - { - /// - /// The overall request timeout, including broker lookup, request - /// transmission, operation time on broker, and response. If set - /// to null, the default request timeout for the AdminClient will - /// be used. - /// - /// Default: null - /// - public TimeSpan? RequestTimeout { get; set; } - - /// - /// The broker's operation timeout - the maximum time to wait for - /// DeleteRecordsAsync before returning a result to the application. - /// If set to null, will return immediately upon triggering record - /// deletion. - /// - /// Default: null - /// - public TimeSpan? OperationTimeout { get; set; } - } -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; + + +namespace Confluent.Kafka.Admin +{ + /// + /// Options for the DeleteGroups method. + /// + public class DeleteGroupsOptions + { + /// + /// The overall request timeout, including broker lookup, request + /// transmission, operation time on broker, and response. If set + /// to null, the default request timeout for the AdminClient will + /// be used. + /// + /// Default: null + /// + public TimeSpan? RequestTimeout { get; set; } + + /// + /// The broker's operation timeout - the maximum time to wait for + /// DeleteRecordsAsync before returning a result to the application. + /// If set to null, will return immediately upon triggering record + /// deletion. + /// + /// Default: null + /// + public TimeSpan? OperationTimeout { get; set; } + } +} diff --git a/src/Confluent.Kafka/Admin/DeleteGroupReport.cs b/src/Confluent.Kafka/Admin/DeleteGroupReport.cs index 15161ee49..0f4605ac5 100644 --- a/src/Confluent.Kafka/Admin/DeleteGroupReport.cs +++ b/src/Confluent.Kafka/Admin/DeleteGroupReport.cs @@ -1,34 +1,34 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -namespace Confluent.Kafka.Admin -{ - /// - /// The result of a DeleteGroup request. - /// - public class DeleteGroupReport - { - /// - /// The group. - /// - public string Group { get; set; } - - /// - /// The error (or success) of the group relevant for the request. - /// - public Error Error { get; set; } - } -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +namespace Confluent.Kafka.Admin +{ + /// + /// The result of a DeleteGroup request. + /// + public class DeleteGroupReport + { + /// + /// The group. + /// + public string Group { get; set; } + + /// + /// The error (or success) of the group relevant for the request. + /// + public Error Error { get; set; } + } +} diff --git a/src/Confluent.Kafka/Admin/DeleteGroupsException.cs b/src/Confluent.Kafka/Admin/DeleteGroupsException.cs index d5e9e2aa6..189db6d7c 100644 --- a/src/Confluent.Kafka/Admin/DeleteGroupsException.cs +++ b/src/Confluent.Kafka/Admin/DeleteGroupsException.cs @@ -1,54 +1,54 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using System; -using System.Linq; -using System.Collections.Generic; - - -namespace Confluent.Kafka.Admin -{ - /// - /// Represents an error that occured during a delete groups request. - /// - public class DeleteGroupsException : KafkaException - { - /// - /// Initializes a new DeleteGroupsException. - /// - /// - /// The result corresponding to all groups in the request - /// (whether or not they were in error). At least one of these - /// results will be in error. - /// - public DeleteGroupsException(List results) - : base(new Error(ErrorCode.Local_Partial, - "An error occurred deleting groups: [" + - String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Group)) + - "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + - "].")) - { - Results = results; - } - - /// - /// The result corresponding to all groups in the request - /// (whether or not they were in error). At least one of these - /// results will be in error. - /// - public List Results { get; } - } -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using System; +using System.Linq; +using System.Collections.Generic; + + +namespace Confluent.Kafka.Admin +{ + /// + /// Represents an error that occured during a delete groups request. + /// + public class DeleteGroupsException : KafkaException + { + /// + /// Initializes a new DeleteGroupsException. + /// + /// + /// The result corresponding to all groups in the request + /// (whether or not they were in error). At least one of these + /// results will be in error. + /// + public DeleteGroupsException(List results) + : base(new Error(ErrorCode.Local_Partial, + "An error occurred deleting groups: [" + + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Group)) + + "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + + "].")) + { + Results = results; + } + + /// + /// The result corresponding to all groups in the request + /// (whether or not they were in error). At least one of these + /// results will be in error. + /// + public List Results { get; } + } +} diff --git a/src/Confluent.Kafka/Admin/DeleteTopicReport.cs b/src/Confluent.Kafka/Admin/DeleteTopicReport.cs index 46c7a9ab1..2605080f6 100644 --- a/src/Confluent.Kafka/Admin/DeleteTopicReport.cs +++ b/src/Confluent.Kafka/Admin/DeleteTopicReport.cs @@ -20,7 +20,7 @@ namespace Confluent.Kafka.Admin /// /// The result of a request to delete a specific topic. /// - public class DeleteTopicReport + public class DeleteTopicReport { /// /// The topic. diff --git a/src/Confluent.Kafka/Admin/DescribeAclsException.cs b/src/Confluent.Kafka/Admin/DescribeAclsException.cs index 37a355f7f..e3cd964e0 100644 --- a/src/Confluent.Kafka/Admin/DescribeAclsException.cs +++ b/src/Confluent.Kafka/Admin/DescribeAclsException.cs @@ -57,7 +57,7 @@ public override bool Equals(Object obj) { return false; } - var exception = (DescribeAclsException) obj; + var exception = (DescribeAclsException)obj; if (base.Equals(exception)) return true; return Error == exception.Error && Result == exception.Result; diff --git a/src/Confluent.Kafka/Admin/DescribeAclsReport.cs b/src/Confluent.Kafka/Admin/DescribeAclsReport.cs index 25e0cdb58..8760687cc 100644 --- a/src/Confluent.Kafka/Admin/DescribeAclsReport.cs +++ b/src/Confluent.Kafka/Admin/DescribeAclsReport.cs @@ -51,7 +51,7 @@ public override bool Equals(object obj) { return false; } - var report = (DescribeAclsReport) obj; + var report = (DescribeAclsReport)obj; if (base.Equals(report)) return true; return Error == report.Error && (AclBindings == null ? report.AclBindings == null : @@ -107,7 +107,7 @@ public override int GetHashCode() if (Error != null) hash ^= Error.GetHashCode(); if (AclBindings != null) { - foreach(AclBinding aclBinding in AclBindings) + foreach (AclBinding aclBinding in AclBindings) { hash ^= aclBinding.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/DescribeAclsResult.cs b/src/Confluent.Kafka/Admin/DescribeAclsResult.cs index 53deef4d5..6d20f7a1a 100644 --- a/src/Confluent.Kafka/Admin/DescribeAclsResult.cs +++ b/src/Confluent.Kafka/Admin/DescribeAclsResult.cs @@ -45,7 +45,7 @@ public override bool Equals(Object obj) { return false; } - var result = (DescribeAclsResult) obj; + var result = (DescribeAclsResult)obj; if (base.Equals(result)) return true; return AclBindings == null ? result.AclBindings == null : new HashSet(AclBindings).SetEquals(new HashSet(result.AclBindings)); @@ -99,7 +99,7 @@ public override int GetHashCode() int hash = 1; if (AclBindings != null) { - foreach(AclBinding aclBinding in AclBindings) + foreach (AclBinding aclBinding in AclBindings) { hash ^= aclBinding.GetHashCode(); } diff --git a/src/Confluent.Kafka/Admin/DescribeClusterResult.cs b/src/Confluent.Kafka/Admin/DescribeClusterResult.cs index c1e2f87cb..20e367dae 100644 --- a/src/Confluent.Kafka/Admin/DescribeClusterResult.cs +++ b/src/Confluent.Kafka/Admin/DescribeClusterResult.cs @@ -69,7 +69,7 @@ public override string ToString() ).ToList()); authorizedOperations = $"[{authorizedOperations}]"; } - + result.Append($"{{\"ClusterId\": {ClusterId.Quote()}"); result.Append($", \"Controller\": {Controller?.ToString() ?? "null"}, \"Nodes\": [{nodes}]"); result.Append($", \"AuthorizedOperations\": {authorizedOperations}}}"); diff --git a/src/Confluent.Kafka/Admin/DescribeConfigsException.cs b/src/Confluent.Kafka/Admin/DescribeConfigsException.cs index c90074774..6e21fedd5 100644 --- a/src/Confluent.Kafka/Admin/DescribeConfigsException.cs +++ b/src/Confluent.Kafka/Admin/DescribeConfigsException.cs @@ -38,7 +38,7 @@ public DescribeConfigsException(List results) : base(new Error(ErrorCode.Local_Partial, "An error occurred describing the following resources: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.ConfigResource)) + - "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + + "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + "].")) { Results = results; diff --git a/src/Confluent.Kafka/Admin/DescribeConsumerGroupsReport.cs b/src/Confluent.Kafka/Admin/DescribeConsumerGroupsReport.cs index 9108c2ded..de13823d7 100644 --- a/src/Confluent.Kafka/Admin/DescribeConsumerGroupsReport.cs +++ b/src/Confluent.Kafka/Admin/DescribeConsumerGroupsReport.cs @@ -31,9 +31,11 @@ public class DescribeConsumerGroupsReport /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { string res = "Groups:\n"; - foreach (ConsumerGroupDescription cgd in ConsumerGroupDescriptions) { + foreach (ConsumerGroupDescription cgd in ConsumerGroupDescriptions) + { res += "\t" + cgd.ToString() + "\n"; } return res; diff --git a/src/Confluent.Kafka/Admin/DescribeConsumerGroupsResult.cs b/src/Confluent.Kafka/Admin/DescribeConsumerGroupsResult.cs index 13883c10f..d93b8d9c8 100644 --- a/src/Confluent.Kafka/Admin/DescribeConsumerGroupsResult.cs +++ b/src/Confluent.Kafka/Admin/DescribeConsumerGroupsResult.cs @@ -30,9 +30,11 @@ public class DescribeConsumerGroupsResult /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { string res = "Groups:\n"; - foreach (ConsumerGroupDescription cgd in ConsumerGroupDescriptions) { + foreach (ConsumerGroupDescription cgd in ConsumerGroupDescriptions) + { res += "\t" + cgd.ToString() + "\n"; } return res; diff --git a/src/Confluent.Kafka/Admin/DescribeTopicsReport.cs b/src/Confluent.Kafka/Admin/DescribeTopicsReport.cs index 9c0f14c9e..ae1134888 100644 --- a/src/Confluent.Kafka/Admin/DescribeTopicsReport.cs +++ b/src/Confluent.Kafka/Admin/DescribeTopicsReport.cs @@ -45,7 +45,7 @@ public override string ToString() TopicDescriptions.Select(topicDescription => topicDescription.ToString() ).ToList()); - + result.Append($"{{\"TopicDescriptions\": [{topicDescriptions}]}}"); return result.ToString(); } diff --git a/src/Confluent.Kafka/Admin/DescribeTopicsResult.cs b/src/Confluent.Kafka/Admin/DescribeTopicsResult.cs index 36c439929..5c34a2913 100644 --- a/src/Confluent.Kafka/Admin/DescribeTopicsResult.cs +++ b/src/Confluent.Kafka/Admin/DescribeTopicsResult.cs @@ -44,7 +44,7 @@ public override string ToString() TopicDescriptions.Select(topicDescription => topicDescription.ToString() ).ToList()); - + result.Append($"{{\"TopicDescriptions\": [{topicDescriptions}]}}"); return result.ToString(); } diff --git a/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsReport.cs b/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsReport.cs index be84edda8..38ac29397 100644 --- a/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsReport.cs +++ b/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsReport.cs @@ -43,9 +43,9 @@ public override string ToString() var result = new StringBuilder(); result.Append( "{\"UserScramCredentialsDescriptions\": ["); - result.Append(string.Join(",",UserScramCredentialsDescriptions.Select(u => u.ToString()))); + result.Append(string.Join(",", UserScramCredentialsDescriptions.Select(u => u.ToString()))); result.Append("]}"); - return result.ToString(); + return result.ToString(); } } } diff --git a/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsResult.cs b/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsResult.cs index 49b4a54d3..24fd8edee 100644 --- a/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsResult.cs +++ b/src/Confluent.Kafka/Admin/DescribeUserScramCredentialsResult.cs @@ -42,9 +42,9 @@ public override string ToString() var result = new StringBuilder(); result.Append( "{\"UserScramCredentialsDescriptions\": ["); - result.Append(string.Join(",",UserScramCredentialsDescriptions.Select(u => u.ToString()))); + result.Append(string.Join(",", UserScramCredentialsDescriptions.Select(u => u.ToString()))); result.Append("]}"); - return result.ToString(); + return result.ToString(); } } } diff --git a/src/Confluent.Kafka/Admin/IncrementalAlterConfigsException.cs b/src/Confluent.Kafka/Admin/IncrementalAlterConfigsException.cs index 3cfdc680e..74793fafa 100644 --- a/src/Confluent.Kafka/Admin/IncrementalAlterConfigsException.cs +++ b/src/Confluent.Kafka/Admin/IncrementalAlterConfigsException.cs @@ -38,12 +38,12 @@ public IncrementalAlterConfigsException(List resu : base(new Error(ErrorCode.Local_Partial, "An error occurred incremental altering the following resources: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.ConfigResource)) + - "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + + "]: [" + String.Join(", ", results.Where(r => r.Error.IsError).Select(r => r.Error)) + "].")) { Results = results; } - + /// /// The result corresponding to all ConfigResources in the request /// (whether or not they were in error). At least one of these diff --git a/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsReport.cs b/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsReport.cs index 6547bddb0..58df214ef 100644 --- a/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsReport.cs +++ b/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsReport.cs @@ -44,7 +44,8 @@ public class ListConsumerGroupOffsetsReport /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { var errString = Error.IsError ? Error.ToString() : ""; return $"{Group} [ {String.Join(", ", Partitions)} ] {errString}"; } diff --git a/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsResult.cs b/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsResult.cs index 3db341e3d..1ce8c636d 100644 --- a/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsResult.cs +++ b/src/Confluent.Kafka/Admin/ListConsumerGroupOffsetsResult.cs @@ -38,7 +38,8 @@ public class ListConsumerGroupOffsetsResult /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { return $"{Group} [ {String.Join(", ", Partitions)} ]"; } } diff --git a/src/Confluent.Kafka/Admin/ListConsumerGroupsReport.cs b/src/Confluent.Kafka/Admin/ListConsumerGroupsReport.cs index fd1ea92a7..ab0807175 100644 --- a/src/Confluent.Kafka/Admin/ListConsumerGroupsReport.cs +++ b/src/Confluent.Kafka/Admin/ListConsumerGroupsReport.cs @@ -35,14 +35,18 @@ public class ListConsumerGroupsReport /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { string res = "Groups:\n"; - foreach (ConsumerGroupListing cgl in Valid) { + foreach (ConsumerGroupListing cgl in Valid) + { res += "\t" + cgl.ToString() + "\n"; } - if (Errors.Count != 0) { + if (Errors.Count != 0) + { res += "Errors:\n"; - foreach (Error err in Errors) { + foreach (Error err in Errors) + { res += "\t" + err.ToString() + "\n"; } } diff --git a/src/Confluent.Kafka/Admin/ListConsumerGroupsResult.cs b/src/Confluent.Kafka/Admin/ListConsumerGroupsResult.cs index 71c9dd19d..490088752 100644 --- a/src/Confluent.Kafka/Admin/ListConsumerGroupsResult.cs +++ b/src/Confluent.Kafka/Admin/ListConsumerGroupsResult.cs @@ -30,9 +30,11 @@ public class ListConsumerGroupsResult /// /// Returns a human readable representation of this object. /// - public override string ToString() { + public override string ToString() + { string res = "Groups:\n"; - foreach (ConsumerGroupListing cgl in Valid) { + foreach (ConsumerGroupListing cgl in Valid) + { res += "\t" + cgl.ToString() + "\n"; } return res; diff --git a/src/Confluent.Kafka/Admin/MemberAssignment.cs b/src/Confluent.Kafka/Admin/MemberAssignment.cs index 464a9ce96..d6a8e0097 100644 --- a/src/Confluent.Kafka/Admin/MemberAssignment.cs +++ b/src/Confluent.Kafka/Admin/MemberAssignment.cs @@ -28,4 +28,4 @@ public class MemberAssignment /// public List TopicPartitions { get; set; } } -} \ No newline at end of file +} diff --git a/src/Confluent.Kafka/Admin/MemberDescription.cs b/src/Confluent.Kafka/Admin/MemberDescription.cs index 7b3fdb372..57df0a5c0 100644 --- a/src/Confluent.Kafka/Admin/MemberDescription.cs +++ b/src/Confluent.Kafka/Admin/MemberDescription.cs @@ -49,7 +49,7 @@ public class MemberDescription /// Member assignment. /// public MemberAssignment Assignment { get; set; } - + /// /// Returns a JSON representation of this object. /// @@ -60,10 +60,10 @@ public override string ToString() { var result = new StringBuilder(); var assignment = string.Join(",", - Assignment.TopicPartitions.Select(topicPartition => + Assignment.TopicPartitions.Select(topicPartition => $"{{\"Topic\": {topicPartition.Topic.Quote()}, \"Partition\": {topicPartition.Partition.Value}}}" ).ToList()); - + result.Append($"{{\"ClientId\": {ClientId.Quote()}"); result.Append($", \"GroupInstanceId\": {GroupInstanceId.Quote()}, \"ConsumerId\": {ConsumerId.Quote()}"); result.Append($", \"Host\": {Host.Quote()}, \"Assignment\": [{assignment}]}}"); diff --git a/src/Confluent.Kafka/Admin/OffsetSpec.cs b/src/Confluent.Kafka/Admin/OffsetSpec.cs index 37ba7f485..e8dbbadac 100644 --- a/src/Confluent.Kafka/Admin/OffsetSpec.cs +++ b/src/Confluent.Kafka/Admin/OffsetSpec.cs @@ -26,7 +26,7 @@ public abstract class OffsetSpec private static EarliestSpec EarliestSpecInstance = new EarliestSpec(); private static LatestSpec LatestSpecInstance = new LatestSpec(); private static MaxTimestampSpec MaxTimestampSpecInstance = new MaxTimestampSpec(); - + /// /// Used to retrieve the earliest offset available. /// @@ -62,7 +62,8 @@ internal override long Value() /// that could not correspond to the latest one as timestamps /// can be specified client-side. /// - public class MaxTimestampSpec : OffsetSpec { + public class MaxTimestampSpec : OffsetSpec + { internal MaxTimestampSpec() { } @@ -132,7 +133,7 @@ public static OffsetSpec MaxTimestamp() { return MaxTimestampSpecInstance; } - + internal abstract long Value(); } } diff --git a/src/Confluent.Kafka/Admin/PartitionsSpecification.cs b/src/Confluent.Kafka/Admin/PartitionsSpecification.cs index b89d779bd..bd035d369 100644 --- a/src/Confluent.Kafka/Admin/PartitionsSpecification.cs +++ b/src/Confluent.Kafka/Admin/PartitionsSpecification.cs @@ -28,7 +28,7 @@ public class PartitionsSpecification /// The topic that the new partitions specification corresponds to. /// public string Topic { get; set; } - + /// /// The replica assignments for the new partitions, or null if the assignment /// will be done by the controller. The outer list is indexed by the new diff --git a/src/Confluent.Kafka/Admin/ResourcePattern.cs b/src/Confluent.Kafka/Admin/ResourcePattern.cs index 732642b1f..ae3c614a6 100644 --- a/src/Confluent.Kafka/Admin/ResourcePattern.cs +++ b/src/Confluent.Kafka/Admin/ResourcePattern.cs @@ -58,7 +58,7 @@ public ResourcePatternFilter ToFilter() /// public ResourcePattern Clone() { - return (ResourcePattern) MemberwiseClone(); + return (ResourcePattern)MemberwiseClone(); } /// diff --git a/src/Confluent.Kafka/Admin/ResourcePatternFilter.cs b/src/Confluent.Kafka/Admin/ResourcePatternFilter.cs index 2041a197d..cd16e1c42 100644 --- a/src/Confluent.Kafka/Admin/ResourcePatternFilter.cs +++ b/src/Confluent.Kafka/Admin/ResourcePatternFilter.cs @@ -49,7 +49,7 @@ public class ResourcePatternFilter /// public ResourcePatternFilter Clone() { - return (ResourcePatternFilter) MemberwiseClone(); + return (ResourcePatternFilter)MemberwiseClone(); } /// diff --git a/src/Confluent.Kafka/Admin/ResourcePatternType.cs b/src/Confluent.Kafka/Admin/ResourcePatternType.cs index 9b77de7a4..1032c0576 100644 --- a/src/Confluent.Kafka/Admin/ResourcePatternType.cs +++ b/src/Confluent.Kafka/Admin/ResourcePatternType.cs @@ -41,7 +41,7 @@ public enum ResourcePatternType : int /// Literal: A literal resource name /// Literal = 3, - + /// /// Prefixed: A prefixed resource name /// diff --git a/src/Confluent.Kafka/Admin/ScramCredentialInfo.cs b/src/Confluent.Kafka/Admin/ScramCredentialInfo.cs index e8635be60..cd703dc93 100644 --- a/src/Confluent.Kafka/Admin/ScramCredentialInfo.cs +++ b/src/Confluent.Kafka/Admin/ScramCredentialInfo.cs @@ -34,7 +34,7 @@ public class ScramCredentialInfo /// The iterations of the ScramCredentialInfo /// public int Iterations { get; set; } - + /// /// Returns a JSON representation of the ScramCredentialInfo object. /// diff --git a/src/Confluent.Kafka/Admin/TopicDescription.cs b/src/Confluent.Kafka/Admin/TopicDescription.cs index 91eb9dcf9..b09f28927 100644 --- a/src/Confluent.Kafka/Admin/TopicDescription.cs +++ b/src/Confluent.Kafka/Admin/TopicDescription.cs @@ -35,13 +35,13 @@ public class TopicDescription /// /// The topic Id. /// - public Uuid TopicId {get; set; } + public Uuid TopicId { get; set; } /// /// Error, if any, of topic reported by the broker /// public Error Error { get; set; } - + /// /// Whether the topic is internal to Kafka. /// An example of an internal topic is the offsets and group management topic: __consumer_offsets. @@ -57,7 +57,7 @@ public class TopicDescription /// AclOperation list (null if not requested or not supported). /// public List AuthorizedOperations { get; set; } - + /// /// Returns a JSON representation of this object. /// diff --git a/src/Confluent.Kafka/Admin/UserScramCredentialDeletion.cs b/src/Confluent.Kafka/Admin/UserScramCredentialDeletion.cs index da4de4d9d..68dfcf6cb 100644 --- a/src/Confluent.Kafka/Admin/UserScramCredentialDeletion.cs +++ b/src/Confluent.Kafka/Admin/UserScramCredentialDeletion.cs @@ -47,7 +47,7 @@ public override string ToString() ", \"Mechanism\": "); result.Append(Mechanism.ToString().Quote()); result.Append("}"); - return result.ToString(); + return result.ToString(); } } } diff --git a/src/Confluent.Kafka/Admin/UserScramCredentialUpsertion.cs b/src/Confluent.Kafka/Admin/UserScramCredentialUpsertion.cs index 0376e6058..4b787f1ad 100644 --- a/src/Confluent.Kafka/Admin/UserScramCredentialUpsertion.cs +++ b/src/Confluent.Kafka/Admin/UserScramCredentialUpsertion.cs @@ -29,12 +29,12 @@ public class UserScramCredentialUpsertion : UserScramCredentialAlteration /// The mechanism and iterations. /// public ScramCredentialInfo ScramCredentialInfo { get; set; } - + /// /// Password to HMAC before storage /// public byte[] Password { get; set; } - + /// /// Salt to use. Will be generated randomly if null (optional) /// @@ -58,7 +58,7 @@ public override string ToString() // Password and Salt aren't included to // avoid accidental leak. result.Append("}"); - return result.ToString(); + return result.ToString(); } } } diff --git a/src/Confluent.Kafka/Admin/UserScramCredentialsDescription.cs b/src/Confluent.Kafka/Admin/UserScramCredentialsDescription.cs index c22888327..2b0aa30b9 100644 --- a/src/Confluent.Kafka/Admin/UserScramCredentialsDescription.cs +++ b/src/Confluent.Kafka/Admin/UserScramCredentialsDescription.cs @@ -41,7 +41,7 @@ public class UserScramCredentialsDescription /// /// User Level Error /// - public Error Error {get;set;} + public Error Error { get; set; } /// /// Returns a JSON representation of the UserScramCredentialsDescription object. @@ -56,10 +56,10 @@ public override string ToString() "{\"User\": "); result.Append(User.Quote()); result.Append(", \"ScramCredentialInfos\": ["); - result.Append(string.Join(", ",ScramCredentialInfos.Select(u => u.ToString()))); + result.Append(string.Join(", ", ScramCredentialInfos.Select(u => u.ToString()))); result.Append("]"); result.Append($", \"Error\": {Error.ToString().Quote()}}}"); - return result.ToString(); + return result.ToString(); } } diff --git a/src/Confluent.Kafka/AdminClient.cs b/src/Confluent.Kafka/AdminClient.cs index ee9a07429..55f816916 100644 --- a/src/Confluent.Kafka/AdminClient.cs +++ b/src/Confluent.Kafka/AdminClient.cs @@ -55,13 +55,13 @@ private List extractTopicResults(IntPtr topicResultsPtr, int IntPtr[] topicResultsPtrArr = new IntPtr[topicResultsCount]; Marshal.Copy(topicResultsPtr, topicResultsPtrArr, 0, topicResultsCount); - return topicResultsPtrArr.Select(topicResultPtr => new CreateTopicReport - { - Topic = PtrToStringUTF8(Librdkafka.topic_result_name(topicResultPtr)), - Error = new Error( - Librdkafka.topic_result_error(topicResultPtr), + return topicResultsPtrArr.Select(topicResultPtr => new CreateTopicReport + { + Topic = PtrToStringUTF8(Librdkafka.topic_result_name(topicResultPtr)), + Error = new Error( + Librdkafka.topic_result_error(topicResultPtr), PtrToStringUTF8(Librdkafka.topic_result_error_string(topicResultPtr))) - }).ToList(); + }).ToList(); } private ConfigEntryResult extractConfigEntry(IntPtr configEntryPtr) @@ -74,7 +74,7 @@ private ConfigEntryResult extractConfigEntry(IntPtr configEntryPtr) Marshal.Copy(synonymsPtr, synonymsPtrArr, 0, (int)synonymsCount); synonyms = synonymsPtrArr .Select(synonymPtr => extractConfigEntry(synonymPtr)) - .Select(e => new ConfigSynonym { Name = e.Name, Value = e.Value, Source = e.Source } ) + .Select(e => new ConfigSynonym { Name = e.Name, Value = e.Value, Source = e.Source }) .ToList(); } @@ -113,7 +113,8 @@ private List extractResultConfigs(IntPtr configResourcesP .Select(configEntryPtr => extractConfigEntry(configEntryPtr)) .ToDictionary(e => e.Name); - result.Add(new DescribeConfigsReport { + result.Add(new DescribeConfigsReport + { ConfigResource = new ConfigResource { Name = resourceName, Type = resourceConfigType }, Entries = configEntries, Error = new Error(errorCode, errorReason) @@ -143,7 +144,7 @@ private List extractCreateAclReports(IntPtr aclResultsPtr, int Marshal.Copy(aclResultsPtr, aclsResultsPtrArr, 0, aclResultsCount); return aclsResultsPtrArr.Select(aclResultPtr => - new CreateAclReport + new CreateAclReport { Error = new Error(Librdkafka.acl_result_error(aclResultPtr), false) } @@ -152,7 +153,7 @@ private List extractCreateAclReports(IntPtr aclResultsPtr, int private List extractAclBindings(IntPtr aclBindingsPtr, int aclBindingsCnt) { - if (aclBindingsCnt == 0) { return new List {}; } + if (aclBindingsCnt == 0) { return new List { }; } IntPtr[] aclBindingsPtrArr = new IntPtr[aclBindingsCnt]; Marshal.Copy(aclBindingsPtr, aclBindingsPtrArr, 0, aclBindingsCnt); @@ -185,7 +186,7 @@ private DescribeAclsReport extractDescribeAclsReport(IntPtr resultPtr) return new DescribeAclsReport { Error = new Error(errCode, errString, false), - AclBindings = extractAclBindings(resultAcls, (int) resultAclCntPtr) + AclBindings = extractAclBindings(resultAcls, (int)resultAclCntPtr) }; } @@ -196,13 +197,14 @@ private List extractDeleteAclsReports(IntPtr resultPtr) IntPtr[] resultResponsesPtrArr = new IntPtr[(int)resultResponsesCntPtr]; Marshal.Copy(resultResponsesPtr, resultResponsesPtrArr, 0, (int)resultResponsesCntPtr); - return resultResponsesPtrArr.Select(resultResponsePtr => { + return resultResponsesPtrArr.Select(resultResponsePtr => + { var matchingAcls = Librdkafka.DeleteAcls_result_response_matching_acls( resultResponsePtr, out UIntPtr resultResponseAclCntPtr); - return new DeleteAclsReport + return new DeleteAclsReport { Error = new Error(Librdkafka.DeleteAcls_result_response_error(resultResponsePtr), false), - AclBindings = extractAclBindings(matchingAcls, (int) resultResponseAclCntPtr) + AclBindings = extractAclBindings(matchingAcls, (int)resultResponseAclCntPtr) }; }).ToList(); } @@ -230,12 +232,14 @@ private List extractListConsumerGroupOffsetsResu IntPtr[] resultGroupsPtrArr = new IntPtr[(int)resultCountPtr]; Marshal.Copy(resultGroupsPtr, resultGroupsPtrArr, 0, (int)resultCountPtr); - return resultGroupsPtrArr.Select(resultGroupPtr => { + return resultGroupsPtrArr.Select(resultGroupPtr => + { // Construct the TopicPartitionOffsetError list from internal list. var partitionsPtr = Librdkafka.group_result_partitions(resultGroupPtr); - return new ListConsumerGroupOffsetsReport { + return new ListConsumerGroupOffsetsReport + { Group = PtrToStringUTF8(Librdkafka.group_result_name(resultGroupPtr)), Error = new Error(Librdkafka.group_result_error(resultGroupPtr), false), Partitions = SafeKafkaHandle.GetTopicPartitionOffsetErrorList(partitionsPtr), @@ -249,12 +253,14 @@ private List extractAlterConsumerGroupOffsetsRe IntPtr[] resultGroupsPtrArr = new IntPtr[(int)resultCountPtr]; Marshal.Copy(resultGroupsPtr, resultGroupsPtrArr, 0, (int)resultCountPtr); - return resultGroupsPtrArr.Select(resultGroupPtr => { + return resultGroupsPtrArr.Select(resultGroupPtr => + { // Construct the TopicPartitionOffsetError list from internal list. var partitionsPtr = Librdkafka.group_result_partitions(resultGroupPtr); - return new AlterConsumerGroupOffsetsReport { + return new AlterConsumerGroupOffsetsReport + { Group = PtrToStringUTF8(Librdkafka.group_result_name(resultGroupPtr)), Error = new Error(Librdkafka.group_result_error(resultGroupPtr), false), Partitions = SafeKafkaHandle.GetTopicPartitionOffsetErrorList(partitionsPtr), @@ -275,7 +281,8 @@ private ListConsumerGroupsReport extractListConsumerGroupsResults(IntPtr resultP { IntPtr[] consumerGroupListingPtrArr = new IntPtr[(int)resultCountPtr]; Marshal.Copy(validResultsPtr, consumerGroupListingPtrArr, 0, (int)resultCountPtr); - result.Valid = consumerGroupListingPtrArr.Select(cglPtr => { + result.Valid = consumerGroupListingPtrArr.Select(cglPtr => + { return new ConsumerGroupListing() { GroupId = PtrToStringUTF8(Librdkafka.ConsumerGroupListing_group_id(cglPtr)), @@ -313,7 +320,8 @@ private DescribeConsumerGroupsReport extractDescribeConsumerGroupsResults(IntPtr IntPtr[] groupPtrArr = new IntPtr[(int)groupsCountPtr]; Marshal.Copy(groupsPtr, groupPtrArr, 0, (int)groupsCountPtr); - result.ConsumerGroupDescriptions = groupPtrArr.Select(groupPtr => { + result.ConsumerGroupDescriptions = groupPtrArr.Select(groupPtr => + { var coordinatorPtr = Librdkafka.ConsumerGroupDescription_coordinator(groupPtr); var coordinator = extractNode(coordinatorPtr); @@ -347,7 +355,7 @@ private DescribeConsumerGroupsReport extractDescribeConsumerGroupsResults(IntPtr var authorizedOperations = extractAuthorizedOperations( Librdkafka.ConsumerGroupDescription_authorized_operations(groupPtr, out UIntPtr authorizedOperationCount), - (int) authorizedOperationCount); + (int)authorizedOperationCount); var desc = new ConsumerGroupDescription() { @@ -374,42 +382,43 @@ private DescribeConsumerGroupsReport extractDescribeConsumerGroupsResults(IntPtr private DescribeUserScramCredentialsReport extractDescribeUserScramCredentialsResult(IntPtr eventPtr) { var report = new DescribeUserScramCredentialsReport(); - + var resultDescriptionsPtr = Librdkafka.DescribeUserScramCredentials_result_descriptions( eventPtr, out UIntPtr resultDescriptionCntPtr); IntPtr[] resultDescriptionsPtrArr = new IntPtr[(int)resultDescriptionCntPtr]; Marshal.Copy(resultDescriptionsPtr, resultDescriptionsPtrArr, 0, (int)resultDescriptionCntPtr); - + var descriptions = resultDescriptionsPtrArr.Select(resultDescriptionPtr => { var description = new UserScramCredentialsDescription(); - + var user = PtrToStringUTF8(Librdkafka.UserScramCredentialsDescription_user(resultDescriptionPtr)); IntPtr cError = Librdkafka.UserScramCredentialsDescription_error(resultDescriptionPtr); var error = new Error(cError, false); var scramCredentialInfos = new List(); - if (Librdkafka.error_code(cError)==0) + if (Librdkafka.error_code(cError) == 0) { int numCredentials = Librdkafka.UserScramCredentialsDescription_scramcredentialinfo_count(resultDescriptionPtr); - for(int j=0; j extractAlterUserScramCredentialsRe IntPtr[] resultResponsesPtrArr = new IntPtr[(int)resultResponsesCntPtr]; Marshal.Copy(resultResponsesPtr, resultResponsesPtrArr, 0, (int)resultResponsesCntPtr); - - return resultResponsesPtrArr.Select(resultResponsePtr => { - var user = + + return resultResponsesPtrArr.Select(resultResponsePtr => + { + var user = PtrToStringUTF8( Librdkafka.AlterUserScramCredentials_result_response_user(resultResponsePtr)); var error = new Error(Librdkafka.AlterUserScramCredentials_result_response_error(resultResponsePtr), false); - return new AlterUserScramCredentialsReport + return new AlterUserScramCredentialsReport { User = user, Error = error @@ -442,18 +452,19 @@ private List extractTopicPartitionInfo(IntPtr topicPartition { if (topicPartitionInfosCount == 0) return new List(); - + IntPtr[] topicPartitionInfos = new IntPtr[topicPartitionInfosCount]; Marshal.Copy(topicPartitionInfosPtr, topicPartitionInfos, 0, topicPartitionInfosCount); - return topicPartitionInfos.Select(topicPartitionInfoPtr => { + return topicPartitionInfos.Select(topicPartitionInfoPtr => + { return new TopicPartitionInfo { ISR = extractNodeList( Librdkafka.TopicPartitionInfo_isr(topicPartitionInfoPtr, out UIntPtr isrCount ), - (int) isrCount + (int)isrCount ), Leader = extractNode(Librdkafka.TopicPartitionInfo_leader(topicPartitionInfoPtr)), Partition = Librdkafka.TopicPartitionInfo_partition(topicPartitionInfoPtr), @@ -461,7 +472,7 @@ out UIntPtr isrCount Librdkafka.TopicPartitionInfo_replicas(topicPartitionInfoPtr, out UIntPtr replicasCount ), - (int) replicasCount + (int)replicasCount ), }; }).ToList(); @@ -493,8 +504,8 @@ private DescribeTopicsReport extractDescribeTopicsResults(IntPtr resultPtr) Librdkafka.TopicDescription_authorized_operations( topicPtr, out UIntPtr authorizedOperationCount), - (int) authorizedOperationCount); - + (int)authorizedOperationCount); + return new TopicDescription() { Name = topicName, @@ -505,7 +516,7 @@ private DescribeTopicsReport extractDescribeTopicsResults(IntPtr resultPtr) Partitions = extractTopicPartitionInfo( Librdkafka.TopicDescription_partitions(topicPtr, out UIntPtr partitionsCount), - (int) partitionsCount + (int)partitionsCount ), }; }).ToList(); @@ -523,15 +534,15 @@ private Uuid extractUuid(IntPtr uuidPtr) Librdkafka.Uuid_most_significant_bits(uuidPtr), Librdkafka.Uuid_least_significant_bits(uuidPtr) ); - } - + } + private Node extractNode(IntPtr nodePtr) { if (nodePtr == IntPtr.Zero) { return null; } - + return new Node() { Id = (int)Librdkafka.Node_id(nodePtr), @@ -558,11 +569,11 @@ private unsafe List extractAuthorizedOperations(IntPtr authorizedO { return null; } - + List authorizedOperations = new List(authorizedOperationCount); for (int i = 0; i < authorizedOperationCount; i++) { - AclOperation *aclOperationPtr = ((AclOperation *) authorizedOperationsPtr.ToPointer()) + i; + AclOperation* aclOperationPtr = ((AclOperation*)authorizedOperationsPtr.ToPointer()) + i; authorizedOperations.Add( *aclOperationPtr); } @@ -577,13 +588,13 @@ private DescribeClusterResult extractDescribeClusterResult(IntPtr resultPtr) var nodes = extractNodeList( Librdkafka.DescribeCluster_result_nodes(resultPtr, out UIntPtr nodeCount), - (int) nodeCount); + (int)nodeCount); List authorizedOperations = extractAuthorizedOperations( Librdkafka.DescribeCluster_result_authorized_operations( resultPtr, out UIntPtr authorizedOperationCount), - (int) authorizedOperationCount); + (int)authorizedOperationCount); return new DescribeClusterResult() { @@ -597,15 +608,15 @@ private DescribeClusterResult extractDescribeClusterResult(IntPtr resultPtr) private ListOffsetsReport extractListOffsetsReport(IntPtr resultPtr) { var resultInfosPtr = Librdkafka.ListOffsets_result_infos(resultPtr, out UIntPtr resulInfosCntPtr); - + IntPtr[] resultResponsesPtrArr = new IntPtr[(int)resulInfosCntPtr]; if ((int)resulInfosCntPtr > 0) { Marshal.Copy(resultInfosPtr, resultResponsesPtrArr, 0, (int)resulInfosCntPtr); - } - + } + ErrorCode reportErrorCode = ErrorCode.NoError; - var listOffsetsResultInfos = resultResponsesPtrArr.Select(resultResponsePtr => + var listOffsetsResultInfos = resultResponsesPtrArr.Select(resultResponsePtr => { long timestamp = Librdkafka.ListOffsetsResultInfo_timestamp(resultResponsePtr); IntPtr c_topic_partition = Librdkafka.ListOffsetsResultInfo_topic_partition(resultResponsePtr); @@ -679,7 +690,7 @@ private Task StartPollTask(CancellationToken ct) { if (errorCode != ErrorCode.NoError) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetException( new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; @@ -690,13 +701,13 @@ private Task StartPollTask(CancellationToken ct) if (result.Any(r => r.Error.IsError)) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetException( new CreateTopicsException(result))); } else { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetResult(result)); } } @@ -741,8 +752,8 @@ private Task StartPollTask(CancellationToken ct) } var result = extractDeleteGroupsReport(eventPtr); - - if(result.Any(r => r.Error.IsError)) + + if (result.Any(r => r.Error.IsError)) { Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetException( @@ -835,7 +846,7 @@ private Task StartPollTask(CancellationToken ct) else { Task.Run(() => - ((TaskCompletionSource>) adminClientResult).TrySetResult(result)); + ((TaskCompletionSource>)adminClientResult).TrySetResult(result)); } } break; @@ -863,12 +874,12 @@ private Task StartPollTask(CancellationToken ct) Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetResult( result.Select(a => new DeleteRecordsResult - { - Topic = a.Topic, - Partition = a.Partition, - Offset = a.Offset, - Error = a.Error // internal, not exposed in success case. - }).ToList())); + { + Topic = a.Topic, + Partition = a.Partition, + Offset = a.Offset, + Error = a.Error // internal, not exposed in success case. + }).ToList())); } } break; @@ -909,7 +920,7 @@ private Task StartPollTask(CancellationToken ct) { if (errorCode != ErrorCode.NoError) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetException( new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; @@ -921,13 +932,13 @@ private Task StartPollTask(CancellationToken ct) if (reports.Any(r => r.Error.IsError)) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetException( new CreateAclsException(reports))); } else { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetResult(null)); } } @@ -936,7 +947,7 @@ private Task StartPollTask(CancellationToken ct) { if (errorCode != ErrorCode.NoError) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetException( new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; @@ -946,7 +957,7 @@ private Task StartPollTask(CancellationToken ct) if (report.Error.IsError) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetException( new DescribeAclsException(report))); } @@ -956,16 +967,16 @@ private Task StartPollTask(CancellationToken ct) { AclBindings = report.AclBindings }; - Task.Run(() => + Task.Run(() => ((TaskCompletionSource)adminClientResult).TrySetResult(result)); } } - break; + break; case Librdkafka.EventType.DeleteAcls_Result: { if (errorCode != ErrorCode.NoError) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetException( new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; @@ -975,54 +986,54 @@ private Task StartPollTask(CancellationToken ct) if (reports.Any(r => r.Error.IsError)) { - Task.Run(() => + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetException( new DeleteAclsException(reports))); } else { var results = reports.Select(report => new DeleteAclsResult - { - AclBindings = report.AclBindings - }).ToList(); - Task.Run(() => + { + AclBindings = report.AclBindings + }).ToList(); + Task.Run(() => ((TaskCompletionSource>)adminClientResult).TrySetResult(results)); } } - break; + break; case Librdkafka.EventType.AlterConsumerGroupOffsets_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var results = extractAlterConsumerGroupOffsetsResults(eventPtr); + if (results.Any(r => r.Error.IsError) || results.Any(r => r.Partitions.Any(p => p.Error.IsError))) + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetException( + new AlterConsumerGroupOffsetsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetResult( + results + .Select(r => new AlterConsumerGroupOffsetsResult() + { + Group = r.Group, + Partitions = r.Partitions + }) + .ToList() + )); + } + break; } - var results = extractAlterConsumerGroupOffsetsResults(eventPtr); - if (results.Any(r => r.Error.IsError) || results.Any(r => r.Partitions.Any(p => p.Error.IsError))) - { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetException( - new AlterConsumerGroupOffsetsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetResult( - results - .Select(r => new AlterConsumerGroupOffsetsResult() - { - Group = r.Group, - Partitions = r.Partitions - }) - .ToList() - )); - } - break; - } - + case Librdkafka.EventType.IncrementalAlterConfigs_Result: { if (errorCode != ErrorCode.NoError) @@ -1047,10 +1058,10 @@ private Task StartPollTask(CancellationToken ct) else { Task.Run(() => - ((TaskCompletionSource>) adminClientResult).TrySetResult( + ((TaskCompletionSource>)adminClientResult).TrySetResult( result.Select(r => new IncrementalAlterConfigsResult { - ConfigResource = r.ConfigResource, + ConfigResource = r.ConfigResource, }).ToList() )); } @@ -1058,200 +1069,200 @@ private Task StartPollTask(CancellationToken ct) break; case Librdkafka.EventType.ListConsumerGroupOffsets_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var results = extractListConsumerGroupOffsetsResults(eventPtr); + if (results.Any(r => r.Error.IsError) || results.Any(r => r.Partitions.Any(p => p.Error.IsError))) + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetException( + new ListConsumerGroupOffsetsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource>)adminClientResult).TrySetResult( + results + .Select(r => new ListConsumerGroupOffsetsResult() { Group = r.Group, Partitions = r.Partitions }) + .ToList() + )); + } + break; } - var results = extractListConsumerGroupOffsetsResults(eventPtr); - if (results.Any(r => r.Error.IsError) || results.Any(r => r.Partitions.Any(p => p.Error.IsError))) - { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetException( - new ListConsumerGroupOffsetsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource>)adminClientResult).TrySetResult( - results - .Select(r => new ListConsumerGroupOffsetsResult() { Group = r.Group, Partitions = r.Partitions }) - .ToList() - )); - } - break; - } case Librdkafka.EventType.ListConsumerGroups_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var results = extractListConsumerGroupsResults(eventPtr); + if (results.Errors.Count() != 0) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new ListConsumerGroupsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult( + new ListConsumerGroupsResult() { Valid = results.Valid } + )); + } + break; } - var results = extractListConsumerGroupsResults(eventPtr); - if (results.Errors.Count() != 0) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new ListConsumerGroupsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult( - new ListConsumerGroupsResult() { Valid = results.Valid } - )); - } - break; - } case Librdkafka.EventType.DescribeConsumerGroups_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var results = extractDescribeConsumerGroupsResults(eventPtr); + if (results.ConsumerGroupDescriptions.Any(desc => desc.Error.IsError)) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new DescribeConsumerGroupsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult( + new DescribeConsumerGroupsResult() { ConsumerGroupDescriptions = results.ConsumerGroupDescriptions } + )); + } + break; } - var results = extractDescribeConsumerGroupsResults(eventPtr); - if (results.ConsumerGroupDescriptions.Any(desc => desc.Error.IsError)) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new DescribeConsumerGroupsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult( - new DescribeConsumerGroupsResult() { ConsumerGroupDescriptions = results.ConsumerGroupDescriptions } - )); - } - break; - } case Librdkafka.EventType.DescribeUserScramCredentials_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; - } - var results = extractDescribeUserScramCredentialsResult(eventPtr); - if (results.UserScramCredentialsDescriptions.Any(desc => desc.Error.IsError)) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new DescribeUserScramCredentialsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult( - new DescribeUserScramCredentialsResult() { UserScramCredentialsDescriptions = results.UserScramCredentialsDescriptions } - )); - } - break; + } + var results = extractDescribeUserScramCredentialsResult(eventPtr); + if (results.UserScramCredentialsDescriptions.Any(desc => desc.Error.IsError)) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new DescribeUserScramCredentialsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult( + new DescribeUserScramCredentialsResult() { UserScramCredentialsDescriptions = results.UserScramCredentialsDescriptions } + )); + } + break; - } + } case Librdkafka.EventType.AlterUserScramCredentials_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; - } - - var results = extractAlterUserScramCredentialsResults(eventPtr); + } - if (results.Any(r => r.Error.IsError)) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new AlterUserScramCredentialsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult(null)); + var results = extractAlterUserScramCredentialsResults(eventPtr); + + if (results.Any(r => r.Error.IsError)) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new AlterUserScramCredentialsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult(null)); + } + + break; } - - break; - } case Librdkafka.EventType.DescribeTopics_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var results = extractDescribeTopicsResults(eventPtr); + if (results.TopicDescriptions.Any(desc => desc.Error.IsError)) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new DescribeTopicsException(results))); + } + else + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult( + new DescribeTopicsResult() { TopicDescriptions = results.TopicDescriptions } + )); + } + break; } - var results = extractDescribeTopicsResults(eventPtr); - if (results.TopicDescriptions.Any(desc => desc.Error.IsError)) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new DescribeTopicsException(results))); - } - else - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult( - new DescribeTopicsResult() { TopicDescriptions = results.TopicDescriptions } - )); - } - break; - } case Librdkafka.EventType.DescribeCluster_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + var res = extractDescribeClusterResult(eventPtr); + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult(res)); + break; } - var res = extractDescribeClusterResult(eventPtr); - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult(res)); - break; - } case Librdkafka.EventType.ListOffsets_Result: - { - if (errorCode != ErrorCode.NoError) { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); + if (errorCode != ErrorCode.NoError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new KafkaException(kafkaHandle.CreatePossiblyFatalError(errorCode, errorStr)))); break; + } + ListOffsetsReport report = extractListOffsetsReport(eventPtr); + if (report.Error.IsError) + { + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetException( + new ListOffsetsException(report))); + } + else + { + var result = new ListOffsetsResult() { ResultInfos = report.ResultInfos }; + Task.Run(() => + ((TaskCompletionSource)adminClientResult).TrySetResult( + result)); + } + break; } - ListOffsetsReport report = extractListOffsetsReport(eventPtr); - if (report.Error.IsError) - { - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetException( - new ListOffsetsException(report))); - } - else - { - var result = new ListOffsetsResult() { ResultInfos = report.ResultInfos }; - Task.Run(() => - ((TaskCompletionSource)adminClientResult).TrySetResult( - result)); - } - break; - } default: // Should never happen. throw new InvalidOperationException($"Unknown result type: {type}"); @@ -1276,7 +1287,7 @@ private Task StartPollTask(CancellationToken ct) } } } - catch (OperationCanceledException) {} + catch (OperationCanceledException) { } }, ct, TaskCreationOptions.LongRunning, TaskScheduler.Default); @@ -1467,7 +1478,7 @@ private SafeKafkaHandle kafkaHandle /// internal AdminClient(Handle handle) { - Config.ExtractCancellationDelayMaxMs(new AdminClientConfig(), out this.cancellationDelayMaxMs); + Config.ExtractCancellationDelayMaxMs(new AdminClientConfig(), out this.cancellationDelayMaxMs); this.ownedClient = null; this.handle = handle; Init(); @@ -1490,9 +1501,9 @@ internal AdminClient(AdminClientBuilder builder) if (builder.StatisticsHandler != null) { producerBuilder.SetStatisticsHandler((_, stats) => builder.StatisticsHandler(this, stats)); } if (builder.OAuthBearerTokenRefreshHandler != null) { producerBuilder.SetOAuthBearerTokenRefreshHandler(builder.OAuthBearerTokenRefreshHandler); } this.ownedClient = producerBuilder.Build(); - + this.handle = new Handle - { + { Owner = this, LibrdkafkaHandle = ownedClient.Handle.LibrdkafkaHandle }; @@ -1768,7 +1779,8 @@ public Task DescribeClusterAsync(DescribeClusterOptions o /// /// Refer to /// - public Task ListOffsetsAsync(IEnumerable topicPartitionOffsetSpecs,ListOffsetsOptions options = null) { + public Task ListOffsetsAsync(IEnumerable topicPartitionOffsetSpecs, ListOffsetsOptions options = null) + { var completionSource = new TaskCompletionSource(); var gch = GCHandle.Alloc(completionSource); Handle.LibrdkafkaHandle.ListOffsets( diff --git a/src/Confluent.Kafka/AdminClientBuilder.cs b/src/Confluent.Kafka/AdminClientBuilder.cs index 6f0e9e712..0c4226c6a 100644 --- a/src/Confluent.Kafka/AdminClientBuilder.cs +++ b/src/Confluent.Kafka/AdminClientBuilder.cs @@ -34,7 +34,7 @@ public class AdminClientBuilder /// The configured error handler. /// internal protected Action ErrorHandler { get; set; } - + /// /// The configured log handler. /// diff --git a/src/Confluent.Kafka/ClientExtensions.cs b/src/Confluent.Kafka/ClientExtensions.cs index 79e685247..cb97f4bf3 100644 --- a/src/Confluent.Kafka/ClientExtensions.cs +++ b/src/Confluent.Kafka/ClientExtensions.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; namespace Confluent.Kafka { diff --git a/src/Confluent.Kafka/ConfigPropertyNames.cs b/src/Confluent.Kafka/ConfigPropertyNames.cs index 19572495e..1fab5704e 100644 --- a/src/Confluent.Kafka/ConfigPropertyNames.cs +++ b/src/Confluent.Kafka/ConfigPropertyNames.cs @@ -57,7 +57,7 @@ public static class Producer /// public const string DeliveryReportFields = "dotnet.producer.delivery.report.fields"; } - + /// /// Consumer specific configuration properties. @@ -86,6 +86,6 @@ public static class Consumer /// range: 1 <= dotnet.cancellation.delay.max.ms <= 10000 /// public const string CancellationDelayMaxMs = "dotnet.cancellation.delay.max.ms"; - + } } diff --git a/src/Confluent.Kafka/ConsumeResult.cs b/src/Confluent.Kafka/ConsumeResult.cs index c3763778b..56367abf3 100644 --- a/src/Confluent.Kafka/ConsumeResult.cs +++ b/src/Confluent.Kafka/ConsumeResult.cs @@ -106,7 +106,7 @@ public TValue Value { throw new MessageNullException(); } - + return Message.Value; } } @@ -140,7 +140,7 @@ public Headers Headers { throw new MessageNullException(); } - + return Message.Headers; } } diff --git a/src/Confluent.Kafka/Consumer.cs b/src/Confluent.Kafka/Consumer.cs index fae61ebed..37b2cc9de 100644 --- a/src/Confluent.Kafka/Consumer.cs +++ b/src/Confluent.Kafka/Consumer.cs @@ -118,7 +118,7 @@ private int StatisticsCallback(IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr { handlerException = e; } - + return 0; // instruct librdkafka to immediately free the json ptr. } @@ -172,7 +172,7 @@ private void RebalanceCallback( { // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios). if (kafkaHandle.IsClosed) - { + { // The RebalanceCallback should never be invoked as a side effect of Dispose. // If for some reason flow of execution gets here, something is badly wrong. // (and we have a closed librdkafka handle that is expecting an assign call...) @@ -335,8 +335,8 @@ private static byte[] KeyAsByteArray(rd_kafka_message msg) byte[] keyAsByteArray = null; if (msg.key != IntPtr.Zero) { - keyAsByteArray = new byte[(int) msg.key_len]; - Marshal.Copy(msg.key, keyAsByteArray, 0, (int) msg.key_len); + keyAsByteArray = new byte[(int)msg.key_len]; + Marshal.Copy(msg.key, keyAsByteArray, 0, (int)msg.key_len); } return keyAsByteArray; } @@ -346,8 +346,8 @@ private static byte[] ValueAsByteArray(rd_kafka_message msg) byte[] valAsByteArray = null; if (msg.val != IntPtr.Zero) { - valAsByteArray = new byte[(int) msg.len]; - Marshal.Copy(msg.val, valAsByteArray, 0, (int) msg.len); + valAsByteArray = new byte[(int)msg.len]; + Marshal.Copy(msg.val, valAsByteArray, 0, (int)msg.len); } return valAsByteArray; } @@ -446,7 +446,7 @@ public void StoreOffset(TopicPartitionOffset offset) { try { - kafkaHandle.StoreOffsets(new [] { offset }); + kafkaHandle.StoreOffsets(new[] { offset }); } catch (TopicPartitionOffsetException e) { @@ -475,7 +475,7 @@ public void Commit(ConsumeResult result) throw new InvalidOperationException("Attempt was made to commit offset corresponding to an empty consume result"); } - Commit(new [] { new TopicPartitionOffset(result.TopicPartition, result.Offset + 1, + Commit(new[] { new TopicPartitionOffset(result.TopicPartition, result.Offset + 1, result.LeaderEpoch) }); } @@ -607,7 +607,7 @@ protected virtual void Dispose(bool disposing) { // Calling Dispose a second or subsequent time should be a no-op. lock (disposeHasBeenCalledLockObj) - { + { if (disposeHasBeenCalled) { return; } disposeHasBeenCalled = true; } @@ -669,7 +669,8 @@ internal Consumer(ConsumerBuilder builder) case "headers": this.enableHeaderMarshaling = true; break; case "timestamp": this.enableTimestampMarshaling = true; break; case "topic": this.enableTopicNameMarshaling = true; break; - default: throw new ArgumentException( + default: + throw new ArgumentException( $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'."); } } @@ -831,7 +832,7 @@ public ConsumeResult Consume(int millisecondsTimeout) Librdkafka.message_headers(msgPtr, out IntPtr hdrsPtr); if (hdrsPtr != IntPtr.Zero) { - for (var i=0; ; ++i) + for (var i = 0; ; ++i) { var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep); if (err != ErrorCode.NoError) @@ -938,7 +939,7 @@ public ConsumeResult Consume(int millisecondsTimeout) ex); } - return new ConsumeResult + return new ConsumeResult { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset, diff --git a/src/Confluent.Kafka/ConsumerBuilder.cs b/src/Confluent.Kafka/ConsumerBuilder.cs index 56549e783..0a5598a84 100644 --- a/src/Confluent.Kafka/ConsumerBuilder.cs +++ b/src/Confluent.Kafka/ConsumerBuilder.cs @@ -87,7 +87,7 @@ public class ConsumerBuilder /// internal protected Action, CommittedOffsets> OffsetsCommittedHandler { get; set; } - internal Consumer.Config ConstructBaseConfig(Consumer consumer) + internal Consumer.Config ConstructBaseConfig(Consumer consumer) { return new Consumer.Config { diff --git a/src/Confluent.Kafka/ConsumerGroupTopicPartitionOffsets.cs b/src/Confluent.Kafka/ConsumerGroupTopicPartitionOffsets.cs index 26a483686..fe82bfe2f 100644 --- a/src/Confluent.Kafka/ConsumerGroupTopicPartitionOffsets.cs +++ b/src/Confluent.Kafka/ConsumerGroupTopicPartitionOffsets.cs @@ -32,7 +32,8 @@ public class ConsumerGroupTopicPartitionOffsets /// /// A list of Kafka (topic, partition) tuples. /// - public ConsumerGroupTopicPartitionOffsets(string group, List topicPartitionOffsets) { + public ConsumerGroupTopicPartitionOffsets(string group, List topicPartitionOffsets) + { this.TopicPartitionOffsets = topicPartitionOffsets; this.Group = group; } diff --git a/src/Confluent.Kafka/ConsumerGroupTopicPartitions.cs b/src/Confluent.Kafka/ConsumerGroupTopicPartitions.cs index d5ac0444b..3832917b0 100644 --- a/src/Confluent.Kafka/ConsumerGroupTopicPartitions.cs +++ b/src/Confluent.Kafka/ConsumerGroupTopicPartitions.cs @@ -32,7 +32,8 @@ public class ConsumerGroupTopicPartitions /// /// A list of Kafka (topic, partition) tuples. /// - public ConsumerGroupTopicPartitions(string group, List topicPartitions) { + public ConsumerGroupTopicPartitions(string group, List topicPartitions) + { this.TopicPartitions = topicPartitions; this.Group = group; } diff --git a/src/Confluent.Kafka/DeliveryResult.cs b/src/Confluent.Kafka/DeliveryResult.cs index af7afeabe..6d9a4f839 100644 --- a/src/Confluent.Kafka/DeliveryResult.cs +++ b/src/Confluent.Kafka/DeliveryResult.cs @@ -64,7 +64,7 @@ public TopicPartitionOffset TopicPartitionOffset /// The persistence status of the message /// public PersistenceStatus Status { get; set; } - + /// /// The Kafka message. /// diff --git a/src/Confluent.Kafka/DependentAdminClientBuilder.cs b/src/Confluent.Kafka/DependentAdminClientBuilder.cs index 1c0d99bd0..71d6f6292 100644 --- a/src/Confluent.Kafka/DependentAdminClientBuilder.cs +++ b/src/Confluent.Kafka/DependentAdminClientBuilder.cs @@ -30,7 +30,7 @@ public class DependentAdminClientBuilder /// The configured client handle. /// public Handle Handle { get; set; } - + /// /// An underlying librdkafka client handle that the AdminClient. /// diff --git a/src/Confluent.Kafka/DependentProducerBuilder.cs b/src/Confluent.Kafka/DependentProducerBuilder.cs index d152ca652..7ca14ed21 100644 --- a/src/Confluent.Kafka/DependentProducerBuilder.cs +++ b/src/Confluent.Kafka/DependentProducerBuilder.cs @@ -33,7 +33,7 @@ public class DependentProducerBuilder /// The configured client handle. /// public Handle Handle { get; set; } - + /// /// The configured key serializer. /// diff --git a/src/Confluent.Kafka/Deserializers.cs b/src/Confluent.Kafka/Deserializers.cs index eab604d4b..9e47f0014 100644 --- a/src/Confluent.Kafka/Deserializers.cs +++ b/src/Confluent.Kafka/Deserializers.cs @@ -29,7 +29,7 @@ public static class Deserializers /// String (UTF8 encoded) deserializer. /// public static IDeserializer Utf8 = new Utf8Deserializer(); - + private class Utf8Deserializer : IDeserializer { public string Deserialize(ReadOnlySpan data, bool isNull, SerializationContext context) @@ -39,11 +39,11 @@ public string Deserialize(ReadOnlySpan data, bool isNull, SerializationCon return null; } - #if NETCOREAPP2_1 +#if NETCOREAPP2_1 return Encoding.UTF8.GetString(data); - #else - return Encoding.UTF8.GetString(data.ToArray()); - #endif +#else + return Encoding.UTF8.GetString(data.ToArray()); +#endif } } @@ -171,11 +171,11 @@ public float Deserialize(ReadOnlySpan data, bool isNull, SerializationCont } else { - #if NETCOREAPP2_1 +#if NETCOREAPP2_1 return BitConverter.ToSingle(data); - #else - return BitConverter.ToSingle(data.ToArray(), 0); - #endif +#else + return BitConverter.ToSingle(data.ToArray(), 0); +#endif } } } @@ -219,11 +219,11 @@ public double Deserialize(ReadOnlySpan data, bool isNull, SerializationCon } else { - #if NETCOREAPP2_1 +#if NETCOREAPP2_1 return BitConverter.ToDouble(data); - #else - return BitConverter.ToDouble(data.ToArray(), 0); - #endif +#else + return BitConverter.ToDouble(data.ToArray(), 0); +#endif } } } diff --git a/src/Confluent.Kafka/Error.cs b/src/Confluent.Kafka/Error.cs index 3000041fd..313b6172f 100644 --- a/src/Confluent.Kafka/Error.cs +++ b/src/Confluent.Kafka/Error.cs @@ -75,7 +75,7 @@ internal Error(IntPtr error, bool destroy) TxnRequiresAbort = false; return; } - + Code = Librdkafka.error_code(error); IsFatal = Librdkafka.error_is_fatal(error); TxnRequiresAbort = Librdkafka.error_txn_requires_abort(error); diff --git a/src/Confluent.Kafka/ErrorCode.cs b/src/Confluent.Kafka/ErrorCode.cs index 344ff6623..9029680f4 100644 --- a/src/Confluent.Kafka/ErrorCode.cs +++ b/src/Confluent.Kafka/ErrorCode.cs @@ -153,7 +153,7 @@ public enum ErrorCode /// /// Assigned partitions (rebalance_cb) /// - Local_AssignPartitions= -175, + Local_AssignPartitions = -175, /// /// Revoked partitions (rebalance_cb) @@ -329,7 +329,7 @@ public enum ErrorCode /// No offset to automatically reset to /// Local_AutoOffsetReset = -140, - + /// /// Partition log truncation detected /// @@ -670,8 +670,8 @@ public enum ErrorCode /// /// Specified Principal is not valid Owner/Renewer. /// - DelegationTokenOwnerMismatch = 63, - + DelegationTokenOwnerMismatch = 63, + /// /// Delegation Token requests are not allowed on this connection. /// diff --git a/src/Confluent.Kafka/Exceptions/TopicPartitionException.cs b/src/Confluent.Kafka/Exceptions/TopicPartitionException.cs index 293f0fd7b..79b6627ce 100644 --- a/src/Confluent.Kafka/Exceptions/TopicPartitionException.cs +++ b/src/Confluent.Kafka/Exceptions/TopicPartitionException.cs @@ -42,7 +42,7 @@ public TopicPartitionException(List results) { Results = results; } - + /// /// The result corresponding to all ConfigResources in the request /// (whether or not they were in error). At least one of these diff --git a/src/Confluent.Kafka/Exceptions/TopicPartitionOffsetException.cs b/src/Confluent.Kafka/Exceptions/TopicPartitionOffsetException.cs index b20176c13..ea66ca64a 100644 --- a/src/Confluent.Kafka/Exceptions/TopicPartitionOffsetException.cs +++ b/src/Confluent.Kafka/Exceptions/TopicPartitionOffsetException.cs @@ -42,7 +42,7 @@ public TopicPartitionOffsetException(List results) { Results = results; } - + /// /// The result corresponding to all ConfigResources in the request /// (whether or not they were in error). At least one of these diff --git a/src/Confluent.Kafka/Header.cs b/src/Confluent.Kafka/Header.cs index 27f9e9ee1..59de8f963 100644 --- a/src/Confluent.Kafka/Header.cs +++ b/src/Confluent.Kafka/Header.cs @@ -41,7 +41,7 @@ public byte[] GetValueBytes() { return val; } - + /// /// Create a new Header instance. /// @@ -53,7 +53,7 @@ public byte[] GetValueBytes() /// public Header(string key, byte[] value) { - if (key == null) + if (key == null) { throw new ArgumentNullException("Kafka message header key cannot be null."); } diff --git a/src/Confluent.Kafka/Headers.cs b/src/Confluent.Kafka/Headers.cs index 3fe66032b..c107e2b14 100644 --- a/src/Confluent.Kafka/Headers.cs +++ b/src/Confluent.Kafka/Headers.cs @@ -31,8 +31,8 @@ public class Headers : IEnumerable { private readonly List headers = new List(); - /// - /// Gets the underlying list of headers + /// + /// Gets the underlying list of headers /// public IReadOnlyList BackingList => headers; @@ -49,7 +49,7 @@ public class Headers : IEnumerable /// public void Add(string key, byte[] val) { - if (key == null) + if (key == null) { throw new ArgumentNullException("Kafka message header key cannot be null."); } @@ -107,7 +107,7 @@ public byte[] GetLastBytes(string key) /// public bool TryGetLastBytes(string key, out byte[] lastHeader) { - for (int i=headers.Count-1; i>=0; --i) + for (int i = headers.Count - 1; i >= 0; --i) { if (headers[i].Key == key) { @@ -141,13 +141,13 @@ public HeadersEnumerator(Headers headers) this.headers = headers; } - public object Current + public object Current => ((IEnumerator)this).Current; IHeader IEnumerator.Current => headers.headers[location]; - public void Dispose() {} + public void Dispose() { } public bool MoveNext() { diff --git a/src/Confluent.Kafka/IAdminClient.cs b/src/Confluent.Kafka/IAdminClient.cs index f351fe813..3a88bc3a4 100644 --- a/src/Confluent.Kafka/IAdminClient.cs +++ b/src/Confluent.Kafka/IAdminClient.cs @@ -536,7 +536,7 @@ Task DescribeConsumerGroupsAsync( /// public static class IAdminClientExtensions { - + /// /// Describes topics in the cluster. /// @@ -569,7 +569,7 @@ public static Task DescribeTopicsAsync( { if (adminClient is AdminClient) { - return ((AdminClient) adminClient).DescribeTopicsAsync( + return ((AdminClient)adminClient).DescribeTopicsAsync( topicCollection, options); } throw new NotImplementedException(); @@ -596,12 +596,12 @@ public static Task DescribeClusterAsync( { if (adminClient is AdminClient) { - return ((AdminClient) adminClient).DescribeClusterAsync( + return ((AdminClient)adminClient).DescribeClusterAsync( options); } throw new NotImplementedException(); } - + /// /// Enables to find the beginning offset, /// end offset as well as the offset matching a timestamp @@ -623,7 +623,7 @@ public static Task ListOffsetsAsync( { if (adminClient is AdminClient) { - return ((AdminClient) adminClient).ListOffsetsAsync( + return ((AdminClient)adminClient).ListOffsetsAsync( topicPartitionOffsets, options); } diff --git a/src/Confluent.Kafka/IConsumer.cs b/src/Confluent.Kafka/IConsumer.cs index 38c3a5a71..3e190e58d 100644 --- a/src/Confluent.Kafka/IConsumer.cs +++ b/src/Confluent.Kafka/IConsumer.cs @@ -179,7 +179,7 @@ public interface IConsumer : IClient /// e.g. ^myregex /// void Subscribe(string topic); - + /// /// Unsubscribe from the current subscription diff --git a/src/Confluent.Kafka/IConsumerExtensions.cs b/src/Confluent.Kafka/IConsumerExtensions.cs index 187df2c60..f80b18c13 100644 --- a/src/Confluent.Kafka/IConsumerExtensions.cs +++ b/src/Confluent.Kafka/IConsumerExtensions.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.Kafka/IHeader.cs b/src/Confluent.Kafka/IHeader.cs index e2d76930e..7554f5f65 100644 --- a/src/Confluent.Kafka/IHeader.cs +++ b/src/Confluent.Kafka/IHeader.cs @@ -26,7 +26,7 @@ public interface IHeader /// The header key. /// string Key { get; } - + /// /// The serialized header value data. /// diff --git a/src/Confluent.Kafka/IProducer.cs b/src/Confluent.Kafka/IProducer.cs index cb3501cdc..b00ce7da6 100644 --- a/src/Confluent.Kafka/IProducer.cs +++ b/src/Confluent.Kafka/IProducer.cs @@ -182,7 +182,7 @@ void Produce( Message message, Action> deliveryHandler = null); - + /// /// Poll for callback events. /// @@ -250,7 +250,7 @@ void Produce( /// int Flush(TimeSpan timeout); - + /// /// Wait until all outstanding produce requests and /// delivery report callbacks are completed. diff --git a/src/Confluent.Kafka/Ignore.cs b/src/Confluent.Kafka/Ignore.cs index 7e5e13606..c7e480e15 100644 --- a/src/Confluent.Kafka/Ignore.cs +++ b/src/Confluent.Kafka/Ignore.cs @@ -23,6 +23,6 @@ namespace Confluent.Kafka /// public sealed class Ignore { - private Ignore() {} + private Ignore() { } } } diff --git a/src/Confluent.Kafka/Impl/LibRdKafka.cs b/src/Confluent.Kafka/Impl/LibRdKafka.cs index 2f542f7ef..1a6504d45 100644 --- a/src/Confluent.Kafka/Impl/LibRdKafka.cs +++ b/src/Confluent.Kafka/Impl/LibRdKafka.cs @@ -57,7 +57,7 @@ internal enum AdminOp Any = 0, CreateTopics = 1, DeleteTopics = 2, - CreatePartitions= 3, + CreatePartitions = 3, AlterConfigs = 4, DescribeConfigs = 5, DeleteRecords = 6, @@ -406,36 +406,36 @@ static bool SetDelegates(Type nativeMethodsClass) _ListConsumerGroupOffsets_result_groups = (_ListConsumerGroupOffsets_result_groups_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroupOffsets_result_groups").CreateDelegate(typeof(_ListConsumerGroupOffsets_result_groups_delegate)); _ListConsumerGroupOffsets = (_ListConsumerGroupOffsets_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroupOffsets").CreateDelegate(typeof(_ListConsumerGroupOffsets_delegate)); - _ListConsumerGroups = (_ListConsumerGroups_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups").CreateDelegate(typeof (_ListConsumerGroups_delegate)); - _ConsumerGroupListing_group_id = (_ConsumerGroupListing_group_id_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_group_id").CreateDelegate(typeof (_ConsumerGroupListing_group_id_delegate)); - _ConsumerGroupListing_is_simple_consumer_group = (_ConsumerGroupListing_is_simple_consumer_group_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_is_simple_consumer_group").CreateDelegate(typeof (_ConsumerGroupListing_is_simple_consumer_group_delegate)); - _ConsumerGroupListing_state = (_ConsumerGroupListing_state_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_state").CreateDelegate(typeof (_ConsumerGroupListing_state_delegate)); - _ListConsumerGroups_result_valid = (_ListConsumerGroups_result_valid_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups_result_valid").CreateDelegate(typeof (_ListConsumerGroups_result_valid_delegate)); - _ListConsumerGroups_result_errors = (_ListConsumerGroups_result_errors_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups_result_errors").CreateDelegate(typeof (_ListConsumerGroups_result_errors_delegate)); - - _DescribeConsumerGroups = (_DescribeConsumerGroups_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeConsumerGroups").CreateDelegate(typeof (_DescribeConsumerGroups_delegate)); - _DescribeConsumerGroups_result_groups = (_DescribeConsumerGroups_result_groups_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeConsumerGroups_result_groups").CreateDelegate(typeof (_DescribeConsumerGroups_result_groups_delegate)); - _ConsumerGroupDescription_group_id = (_ConsumerGroupDescription_group_id_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_group_id").CreateDelegate(typeof (_ConsumerGroupDescription_group_id_delegate)); - _ConsumerGroupDescription_error = (_ConsumerGroupDescription_error_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_error").CreateDelegate(typeof (_ConsumerGroupDescription_error_delegate)); - _ConsumerGroupDescription_is_simple_consumer_group = (_ConsumerGroupDescription_is_simple_consumer_group_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_is_simple_consumer_group").CreateDelegate(typeof (_ConsumerGroupDescription_is_simple_consumer_group_delegate)); - _ConsumerGroupDescription_partition_assignor = (_ConsumerGroupDescription_partition_assignor_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_partition_assignor").CreateDelegate(typeof (_ConsumerGroupDescription_partition_assignor_delegate)); - _ConsumerGroupDescription_state = (_ConsumerGroupDescription_state_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_state").CreateDelegate(typeof (_ConsumerGroupDescription_state_delegate)); - _ConsumerGroupDescription_coordinator = (_ConsumerGroupDescription_coordinator_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_coordinator").CreateDelegate(typeof (_ConsumerGroupDescription_coordinator_delegate)); - _ConsumerGroupDescription_member_count = (_ConsumerGroupDescription_member_count_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_member_count").CreateDelegate(typeof (_ConsumerGroupDescription_member_count_delegate)); - _ConsumerGroupDescription_authorized_operations = (_ConsumerGroupDescription_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_authorized_operations").CreateDelegate(typeof (_ConsumerGroupDescription_authorized_operations_delegate)); - _ConsumerGroupDescription_member = (_ConsumerGroupDescription_member_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_member").CreateDelegate(typeof (_ConsumerGroupDescription_member_delegate)); - _MemberDescription_client_id = (_MemberDescription_client_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_client_id").CreateDelegate(typeof (_MemberDescription_client_id_delegate)); - _MemberDescription_group_instance_id = (_MemberDescription_group_instance_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_group_instance_id").CreateDelegate(typeof (_MemberDescription_group_instance_id_delegate)); - _MemberDescription_consumer_id = (_MemberDescription_consumer_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_consumer_id").CreateDelegate(typeof (_MemberDescription_consumer_id_delegate)); - _MemberDescription_host = (_MemberDescription_host_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_host").CreateDelegate(typeof (_MemberDescription_host_delegate)); - _MemberDescription_assignment = (_MemberDescription_assignment_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_assignment").CreateDelegate(typeof (_MemberDescription_assignment_delegate)); - _MemberAssignment_partitions = (_MemberAssignment_partitions_delegate)methods.Single(m => m.Name == "rd_kafka_MemberAssignment_partitions").CreateDelegate(typeof (_MemberAssignment_partitions_delegate)); - _Node_id = (_Node_id_delegate)methods.Single(m => m.Name == "rd_kafka_Node_id").CreateDelegate(typeof (_Node_id_delegate)); - _Node_host = (_Node_host_delegate)methods.Single(m => m.Name == "rd_kafka_Node_host").CreateDelegate(typeof (_Node_host_delegate)); - _Node_port = (_Node_port_delegate)methods.Single(m => m.Name == "rd_kafka_Node_port").CreateDelegate(typeof (_Node_port_delegate)); - _Node_rack = (_Node_rack_delegate)methods.Single(m => m.Name == "rd_kafka_Node_rack").CreateDelegate(typeof (_Node_rack_delegate)); - - _DescribeUserScramCredentials = (_DescribeUserScramCredentials_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeUserScramCredentials").CreateDelegate(typeof (_DescribeUserScramCredentials_delegate)); + _ListConsumerGroups = (_ListConsumerGroups_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups").CreateDelegate(typeof(_ListConsumerGroups_delegate)); + _ConsumerGroupListing_group_id = (_ConsumerGroupListing_group_id_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_group_id").CreateDelegate(typeof(_ConsumerGroupListing_group_id_delegate)); + _ConsumerGroupListing_is_simple_consumer_group = (_ConsumerGroupListing_is_simple_consumer_group_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_is_simple_consumer_group").CreateDelegate(typeof(_ConsumerGroupListing_is_simple_consumer_group_delegate)); + _ConsumerGroupListing_state = (_ConsumerGroupListing_state_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupListing_state").CreateDelegate(typeof(_ConsumerGroupListing_state_delegate)); + _ListConsumerGroups_result_valid = (_ListConsumerGroups_result_valid_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups_result_valid").CreateDelegate(typeof(_ListConsumerGroups_result_valid_delegate)); + _ListConsumerGroups_result_errors = (_ListConsumerGroups_result_errors_delegate)methods.Single(m => m.Name == "rd_kafka_ListConsumerGroups_result_errors").CreateDelegate(typeof(_ListConsumerGroups_result_errors_delegate)); + + _DescribeConsumerGroups = (_DescribeConsumerGroups_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeConsumerGroups").CreateDelegate(typeof(_DescribeConsumerGroups_delegate)); + _DescribeConsumerGroups_result_groups = (_DescribeConsumerGroups_result_groups_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeConsumerGroups_result_groups").CreateDelegate(typeof(_DescribeConsumerGroups_result_groups_delegate)); + _ConsumerGroupDescription_group_id = (_ConsumerGroupDescription_group_id_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_group_id").CreateDelegate(typeof(_ConsumerGroupDescription_group_id_delegate)); + _ConsumerGroupDescription_error = (_ConsumerGroupDescription_error_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_error").CreateDelegate(typeof(_ConsumerGroupDescription_error_delegate)); + _ConsumerGroupDescription_is_simple_consumer_group = (_ConsumerGroupDescription_is_simple_consumer_group_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_is_simple_consumer_group").CreateDelegate(typeof(_ConsumerGroupDescription_is_simple_consumer_group_delegate)); + _ConsumerGroupDescription_partition_assignor = (_ConsumerGroupDescription_partition_assignor_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_partition_assignor").CreateDelegate(typeof(_ConsumerGroupDescription_partition_assignor_delegate)); + _ConsumerGroupDescription_state = (_ConsumerGroupDescription_state_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_state").CreateDelegate(typeof(_ConsumerGroupDescription_state_delegate)); + _ConsumerGroupDescription_coordinator = (_ConsumerGroupDescription_coordinator_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_coordinator").CreateDelegate(typeof(_ConsumerGroupDescription_coordinator_delegate)); + _ConsumerGroupDescription_member_count = (_ConsumerGroupDescription_member_count_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_member_count").CreateDelegate(typeof(_ConsumerGroupDescription_member_count_delegate)); + _ConsumerGroupDescription_authorized_operations = (_ConsumerGroupDescription_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_authorized_operations").CreateDelegate(typeof(_ConsumerGroupDescription_authorized_operations_delegate)); + _ConsumerGroupDescription_member = (_ConsumerGroupDescription_member_delegate)methods.Single(m => m.Name == "rd_kafka_ConsumerGroupDescription_member").CreateDelegate(typeof(_ConsumerGroupDescription_member_delegate)); + _MemberDescription_client_id = (_MemberDescription_client_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_client_id").CreateDelegate(typeof(_MemberDescription_client_id_delegate)); + _MemberDescription_group_instance_id = (_MemberDescription_group_instance_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_group_instance_id").CreateDelegate(typeof(_MemberDescription_group_instance_id_delegate)); + _MemberDescription_consumer_id = (_MemberDescription_consumer_id_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_consumer_id").CreateDelegate(typeof(_MemberDescription_consumer_id_delegate)); + _MemberDescription_host = (_MemberDescription_host_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_host").CreateDelegate(typeof(_MemberDescription_host_delegate)); + _MemberDescription_assignment = (_MemberDescription_assignment_delegate)methods.Single(m => m.Name == "rd_kafka_MemberDescription_assignment").CreateDelegate(typeof(_MemberDescription_assignment_delegate)); + _MemberAssignment_partitions = (_MemberAssignment_partitions_delegate)methods.Single(m => m.Name == "rd_kafka_MemberAssignment_partitions").CreateDelegate(typeof(_MemberAssignment_partitions_delegate)); + _Node_id = (_Node_id_delegate)methods.Single(m => m.Name == "rd_kafka_Node_id").CreateDelegate(typeof(_Node_id_delegate)); + _Node_host = (_Node_host_delegate)methods.Single(m => m.Name == "rd_kafka_Node_host").CreateDelegate(typeof(_Node_host_delegate)); + _Node_port = (_Node_port_delegate)methods.Single(m => m.Name == "rd_kafka_Node_port").CreateDelegate(typeof(_Node_port_delegate)); + _Node_rack = (_Node_rack_delegate)methods.Single(m => m.Name == "rd_kafka_Node_rack").CreateDelegate(typeof(_Node_rack_delegate)); + + _DescribeUserScramCredentials = (_DescribeUserScramCredentials_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeUserScramCredentials").CreateDelegate(typeof(_DescribeUserScramCredentials_delegate)); _DescribeUserScramCredentials_result_descriptions = (_DescribeUserScramCredentials_result_descriptions_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeUserScramCredentials_result_descriptions").CreateDelegate(typeof(_DescribeUserScramCredentials_result_descriptions_delegate)); _UserScramCredentialsDescription_user = (_UserScramCredentialsDescription_user_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialsDescription_user").CreateDelegate(typeof(_UserScramCredentialsDescription_user_delegate)); _UserScramCredentialsDescription_error = (_UserScramCredentialsDescription_error_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialsDescription_error").CreateDelegate(typeof(_UserScramCredentialsDescription_error_delegate)); @@ -444,39 +444,39 @@ static bool SetDelegates(Type nativeMethodsClass) _ScramCredentialInfo_mechanism = (_ScramCredentialInfo_mechanism_delegate)methods.Single(m => m.Name == "rd_kafka_ScramCredentialInfo_mechanism").CreateDelegate(typeof(_ScramCredentialInfo_mechanism_delegate)); _ScramCredentialInfo_iterations = (_ScramCredentialInfo_iterations_delegate)methods.Single(m => m.Name == "rd_kafka_ScramCredentialInfo_iterations").CreateDelegate(typeof(_ScramCredentialInfo_iterations_delegate)); - _UserScramCredentialUpsertion_new = (_UserScramCredentialUpsertion_new_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialUpsertion_new").CreateDelegate(typeof (_UserScramCredentialUpsertion_new_delegate)); - _UserScramCredentialDeletion_new = (_UserScramCredentialDeletion_new_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialDeletion_new").CreateDelegate(typeof (_UserScramCredentialDeletion_new_delegate)); - _UserScramCredentialAlteration_destroy = (_UserScramCredentialAlteration_destroy_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialAlteration_destroy").CreateDelegate(typeof (_UserScramCredentialAlteration_destroy_delegate)); - _AlterUserScramCredentials = (_AlterUserScramCredentials_delegate)methods.Single(m => m.Name == "rd_kafka_AlterUserScramCredentials").CreateDelegate(typeof (_AlterUserScramCredentials_delegate)); + _UserScramCredentialUpsertion_new = (_UserScramCredentialUpsertion_new_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialUpsertion_new").CreateDelegate(typeof(_UserScramCredentialUpsertion_new_delegate)); + _UserScramCredentialDeletion_new = (_UserScramCredentialDeletion_new_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialDeletion_new").CreateDelegate(typeof(_UserScramCredentialDeletion_new_delegate)); + _UserScramCredentialAlteration_destroy = (_UserScramCredentialAlteration_destroy_delegate)methods.Single(m => m.Name == "rd_kafka_UserScramCredentialAlteration_destroy").CreateDelegate(typeof(_UserScramCredentialAlteration_destroy_delegate)); + _AlterUserScramCredentials = (_AlterUserScramCredentials_delegate)methods.Single(m => m.Name == "rd_kafka_AlterUserScramCredentials").CreateDelegate(typeof(_AlterUserScramCredentials_delegate)); _AlterUserScramCredentials_result_responses = (_AlterUserScramCredentials_result_responses_delegate)methods.Single(m => m.Name == "rd_kafka_AlterUserScramCredentials_result_responses").CreateDelegate(typeof(_AlterUserScramCredentials_result_responses_delegate)); _AlterUserScramCredentials_result_response_user = (_AlterUserScramCredentials_result_response_user_delegate)methods.Single(m => m.Name == "rd_kafka_AlterUserScramCredentials_result_response_user").CreateDelegate(typeof(_AlterUserScramCredentials_result_response_user_delegate)); _AlterUserScramCredentials_result_response_error = (_AlterUserScramCredentials_result_response_error_delegate)methods.Single(m => m.Name == "rd_kafka_AlterUserScramCredentials_result_response_error").CreateDelegate(typeof(_AlterUserScramCredentials_result_response_error_delegate)); - - _ListOffsets = (_ListOffsets_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsets").CreateDelegate(typeof (_ListOffsets_delegate)); - _ListOffsets_result_infos = (_ListOffsets_result_infos_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsets_result_infos").CreateDelegate(typeof (_ListOffsets_result_infos_delegate)); - _ListOffsetsResultInfo_timestamp = (_ListOffsetsResultInfo_timestamp_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsetsResultInfo_timestamp").CreateDelegate(typeof (_ListOffsetsResultInfo_timestamp_delegate)); - _ListOffsetsResultInfo_topic_partition = (_ListOffsetsResultInfo_topic_partition_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsetsResultInfo_topic_partition").CreateDelegate(typeof (_ListOffsetsResultInfo_topic_partition_delegate)); - - _DescribeTopics = (_DescribeTopics_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeTopics").CreateDelegate(typeof (_DescribeTopics_delegate)); - _DescribeTopics_result_topics = (_DescribeTopics_result_topics_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeTopics_result_topics").CreateDelegate(typeof (_DescribeTopics_result_topics_delegate)); - _TopicCollection_of_topic_names = (_TopicCollection_of_topic_names_delegate)methods.Single(m => m.Name == "rd_kafka_TopicCollection_of_topic_names").CreateDelegate(typeof (_TopicCollection_of_topic_names_delegate)); - _TopicCollection_destroy = (_TopicCollection_destroy_delegate)methods.Single(m => m.Name == "rd_kafka_TopicCollection_destroy").CreateDelegate(typeof (_TopicCollection_destroy_delegate)); - _TopicDescription_error = (_TopicDescription_error_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_error").CreateDelegate(typeof (_TopicDescription_error_delegate)); - _TopicDescription_name = (_TopicDescription_name_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_name").CreateDelegate(typeof (_TopicDescription_name_delegate)); - _TopicDescription_topic_id = (_TopicDescription_topic_id_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_topic_id").CreateDelegate(typeof (_TopicDescription_topic_id_delegate)); - _TopicDescription_partitions = (_TopicDescription_partitions_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_partitions").CreateDelegate(typeof (_TopicDescription_partitions_delegate)); - _TopicDescription_is_internal = (_TopicDescription_is_internal_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_is_internal").CreateDelegate(typeof (_TopicDescription_is_internal_delegate)); - _TopicDescription_authorized_operations = (_TopicDescription_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_authorized_operations").CreateDelegate(typeof (_TopicDescription_authorized_operations_delegate)); - _TopicPartitionInfo_isr = (_TopicPartitionInfo_isr_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_isr").CreateDelegate(typeof (_TopicPartitionInfo_isr_delegate)); - _TopicPartitionInfo_leader = (_TopicPartitionInfo_leader_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_leader").CreateDelegate(typeof (_TopicPartitionInfo_leader_delegate)); - _TopicPartitionInfo_partition = (_TopicPartitionInfo_partition_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_partition").CreateDelegate(typeof (_TopicPartitionInfo_partition_delegate)); - _TopicPartitionInfo_replicas = (_TopicPartitionInfo_replicas_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_replicas").CreateDelegate(typeof (_TopicPartitionInfo_replicas_delegate)); - - _DescribeCluster = (_DescribeCluster_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster").CreateDelegate(typeof (_DescribeCluster_delegate)); - _DescribeCluster_result_nodes = (_DescribeCluster_result_nodes_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_nodes").CreateDelegate(typeof (_DescribeCluster_result_nodes_delegate)); - _DescribeCluster_result_authorized_operations = (_DescribeCluster_result_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_authorized_operations").CreateDelegate(typeof (_DescribeCluster_result_authorized_operations_delegate)); - _DescribeCluster_result_controller = (_DescribeCluster_result_controller_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_controller").CreateDelegate(typeof (_DescribeCluster_result_controller_delegate)); - _DescribeCluster_result_cluster_id = (_DescribeCluster_result_cluster_id_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_cluster_id").CreateDelegate(typeof (_DescribeCluster_result_cluster_id_delegate)); + + _ListOffsets = (_ListOffsets_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsets").CreateDelegate(typeof(_ListOffsets_delegate)); + _ListOffsets_result_infos = (_ListOffsets_result_infos_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsets_result_infos").CreateDelegate(typeof(_ListOffsets_result_infos_delegate)); + _ListOffsetsResultInfo_timestamp = (_ListOffsetsResultInfo_timestamp_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsetsResultInfo_timestamp").CreateDelegate(typeof(_ListOffsetsResultInfo_timestamp_delegate)); + _ListOffsetsResultInfo_topic_partition = (_ListOffsetsResultInfo_topic_partition_delegate)methods.Single(m => m.Name == "rd_kafka_ListOffsetsResultInfo_topic_partition").CreateDelegate(typeof(_ListOffsetsResultInfo_topic_partition_delegate)); + + _DescribeTopics = (_DescribeTopics_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeTopics").CreateDelegate(typeof(_DescribeTopics_delegate)); + _DescribeTopics_result_topics = (_DescribeTopics_result_topics_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeTopics_result_topics").CreateDelegate(typeof(_DescribeTopics_result_topics_delegate)); + _TopicCollection_of_topic_names = (_TopicCollection_of_topic_names_delegate)methods.Single(m => m.Name == "rd_kafka_TopicCollection_of_topic_names").CreateDelegate(typeof(_TopicCollection_of_topic_names_delegate)); + _TopicCollection_destroy = (_TopicCollection_destroy_delegate)methods.Single(m => m.Name == "rd_kafka_TopicCollection_destroy").CreateDelegate(typeof(_TopicCollection_destroy_delegate)); + _TopicDescription_error = (_TopicDescription_error_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_error").CreateDelegate(typeof(_TopicDescription_error_delegate)); + _TopicDescription_name = (_TopicDescription_name_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_name").CreateDelegate(typeof(_TopicDescription_name_delegate)); + _TopicDescription_topic_id = (_TopicDescription_topic_id_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_topic_id").CreateDelegate(typeof(_TopicDescription_topic_id_delegate)); + _TopicDescription_partitions = (_TopicDescription_partitions_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_partitions").CreateDelegate(typeof(_TopicDescription_partitions_delegate)); + _TopicDescription_is_internal = (_TopicDescription_is_internal_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_is_internal").CreateDelegate(typeof(_TopicDescription_is_internal_delegate)); + _TopicDescription_authorized_operations = (_TopicDescription_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_TopicDescription_authorized_operations").CreateDelegate(typeof(_TopicDescription_authorized_operations_delegate)); + _TopicPartitionInfo_isr = (_TopicPartitionInfo_isr_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_isr").CreateDelegate(typeof(_TopicPartitionInfo_isr_delegate)); + _TopicPartitionInfo_leader = (_TopicPartitionInfo_leader_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_leader").CreateDelegate(typeof(_TopicPartitionInfo_leader_delegate)); + _TopicPartitionInfo_partition = (_TopicPartitionInfo_partition_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_partition").CreateDelegate(typeof(_TopicPartitionInfo_partition_delegate)); + _TopicPartitionInfo_replicas = (_TopicPartitionInfo_replicas_delegate)methods.Single(m => m.Name == "rd_kafka_TopicPartitionInfo_replicas").CreateDelegate(typeof(_TopicPartitionInfo_replicas_delegate)); + + _DescribeCluster = (_DescribeCluster_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster").CreateDelegate(typeof(_DescribeCluster_delegate)); + _DescribeCluster_result_nodes = (_DescribeCluster_result_nodes_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_nodes").CreateDelegate(typeof(_DescribeCluster_result_nodes_delegate)); + _DescribeCluster_result_authorized_operations = (_DescribeCluster_result_authorized_operations_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_authorized_operations").CreateDelegate(typeof(_DescribeCluster_result_authorized_operations_delegate)); + _DescribeCluster_result_controller = (_DescribeCluster_result_controller_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_controller").CreateDelegate(typeof(_DescribeCluster_result_controller_delegate)); + _DescribeCluster_result_cluster_id = (_DescribeCluster_result_cluster_id_delegate)methods.Single(m => m.Name == "rd_kafka_DescribeCluster_result_cluster_id").CreateDelegate(typeof(_DescribeCluster_result_cluster_id_delegate)); _topic_result_error = (Func)methods.Single(m => m.Name == "rd_kafka_topic_result_error").CreateDelegate(typeof(Func)); _topic_result_error_string = (Func)methods.Single(m => m.Name == "rd_kafka_topic_result_error_string").CreateDelegate(typeof(Func)); @@ -1166,7 +1166,7 @@ internal static ErrorCode resume_partitions(IntPtr rk, IntPtr partitions) private static Func _seek; internal static ErrorCode seek(IntPtr rkt, int partition, long offset, IntPtr timeout_ms) => _seek(rkt, partition, offset, timeout_ms); - + private static Func _seek_partitions; internal static IntPtr seek_partitions(IntPtr rkt, IntPtr partitions, IntPtr timeout_ms) => _seek_partitions(rkt, partitions, timeout_ms); @@ -1197,18 +1197,18 @@ internal static unsafe ErrorCode produceva( IntPtr msg_opaque) { IntPtr topicStrPtr = Marshal.StringToHGlobalAnsi(topic); - + try { rd_kafka_vu* vus = stackalloc rd_kafka_vu[] { - new rd_kafka_vu() {vt = rd_kafka_vtype.Topic, data = new vu_data() {topic = topicStrPtr}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Partition, data = new vu_data() {partition = partition}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.MsgFlags, data = new vu_data() {msgflags = msgflags}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Value, data = new vu_data() {val = new ptr_and_size() {ptr = val, size = len}}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Key, data = new vu_data() {key = new ptr_and_size() {ptr = key, size = keylen}}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Timestamp, data = new vu_data() {timestamp = timestamp}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Headers, data = new vu_data() {headers = headers}}, - new rd_kafka_vu() {vt = rd_kafka_vtype.Opaque, data = new vu_data() {opaque = msg_opaque}}, + new rd_kafka_vu() { vt = rd_kafka_vtype.Topic, data = new vu_data() { topic = topicStrPtr } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Partition, data = new vu_data() { partition = partition } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.MsgFlags, data = new vu_data() { msgflags = msgflags } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Value, data = new vu_data() { val = new ptr_and_size() { ptr = val, size = len } } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Key, data = new vu_data() { key = new ptr_and_size() { ptr = key, size = keylen } } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Timestamp, data = new vu_data() { timestamp = timestamp } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Headers, data = new vu_data() { headers = headers } }, + new rd_kafka_vu() { vt = rd_kafka_vtype.Opaque, data = new vu_data() { opaque = msg_opaque } }, }; IntPtr result = _produceva(rk, vus, new IntPtr(8)); @@ -1481,7 +1481,7 @@ internal static IntPtr ConfigEntry_name( IntPtr entry) => _ConfigEntry_name(entry); private static Func _ConfigEntry_value; - internal static IntPtr ConfigEntry_value ( + internal static IntPtr ConfigEntry_value( IntPtr entry) => _ConfigEntry_value(entry); private static Func _ConfigEntry_source; @@ -1501,7 +1501,7 @@ internal static IntPtr ConfigEntry_is_sensitive( IntPtr entry) => _ConfigEntry_is_sensitive(entry); private static Func _ConfigEntry_is_synonym; - internal static IntPtr ConfigEntry_is_synonym ( + internal static IntPtr ConfigEntry_is_synonym( IntPtr entry) => _ConfigEntry_is_synonym(entry); private delegate IntPtr _ConfigEntry_synonyms_delegate(IntPtr entry, out UIntPtr cntp); @@ -1543,7 +1543,7 @@ internal static ErrorCode ConfigResource_delete_config( private static Func _ConfigResource_add_incremental_config; internal static IntPtr ConfigResource_add_incremental_config( IntPtr config, - string name, + string name, AlterConfigOpType optype, string value) => _ConfigResource_add_incremental_config(config, name, optype, value); @@ -1572,7 +1572,7 @@ internal static IntPtr ConfigResource_error_string( private static Action _AlterConfigs; - internal static void AlterConfigs ( + internal static void AlterConfigs( IntPtr rk, IntPtr[] configs, UIntPtr config_cnt, @@ -1584,9 +1584,9 @@ internal static void AlterConfigs ( internal static IntPtr AlterConfigs_result_resources( IntPtr result, out UIntPtr cntp) => _AlterConfigs_result_resources(result, out cntp); - + private static Action _IncrementalAlterConfigs; - internal static void IncrementalAlterConfigs ( + internal static void IncrementalAlterConfigs( IntPtr rk, IntPtr[] configs, UIntPtr config_cnt, @@ -1600,7 +1600,7 @@ internal static IntPtr IncrementalAlterConfigs_result_resources( out UIntPtr cntp) => _IncrementalAlterConfigs_result_resources(result, out cntp); private static Action _DescribeConfigs; - internal static void DescribeConfigs ( + internal static void DescribeConfigs( IntPtr rk, IntPtr[] configs, UIntPtr config_cnt, @@ -1663,7 +1663,7 @@ internal static IntPtr DeleteConsumerGroupOffsets_result_groups( // ACLs // private delegate IntPtr _AclBinding_new_delegate(ResourceType restype, string name, ResourcePatternType resource_pattern_type, string principal, string host, AclOperation operation, AclPermissionType permission_type, StringBuilder errstr, UIntPtr errstr_size); - private static _AclBinding_new_delegate _AclBinding_new; + private static _AclBinding_new_delegate _AclBinding_new; internal static IntPtr AclBinding_new( ResourceType restype, string name, @@ -1677,7 +1677,7 @@ UIntPtr errstr_size ) => _AclBinding_new(restype, name, resource_pattern_type, principal, host, operation, permission_type, errstr, errstr_size); private delegate IntPtr _AclBindingFilter_new_delegate(ResourceType restype, string name, ResourcePatternType resource_pattern_type, string principal, string host, AclOperation operation, AclPermissionType permission_type, StringBuilder errstr, UIntPtr errstr_size); - private static _AclBindingFilter_new_delegate _AclBindingFilter_new; + private static _AclBindingFilter_new_delegate _AclBindingFilter_new; internal static IntPtr AclBindingFilter_new( ResourceType restype, string name, @@ -1863,123 +1863,124 @@ internal static IntPtr ListConsumerGroupOffsets_result_groups( out UIntPtr groupsTopicPartitionsCount ) => _ListConsumerGroupOffsets_result_groups(resultResponse, out groupsTopicPartitionsCount); - private delegate void _ListConsumerGroups_delegate(IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr); - private static _ListConsumerGroups_delegate _ListConsumerGroups; - internal static void ListConsumerGroups(IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr) - => _ListConsumerGroups(handle, optionsPtr, resultQueuePtr); - - private delegate IntPtr _ConsumerGroupListing_group_id_delegate(IntPtr grplist); - private static _ConsumerGroupListing_group_id_delegate _ConsumerGroupListing_group_id; - internal static IntPtr ConsumerGroupListing_group_id(IntPtr grplist) - => _ConsumerGroupListing_group_id(grplist); - - private delegate IntPtr _ConsumerGroupListing_is_simple_consumer_group_delegate(IntPtr grplist); - private static _ConsumerGroupListing_is_simple_consumer_group_delegate _ConsumerGroupListing_is_simple_consumer_group; - internal static IntPtr ConsumerGroupListing_is_simple_consumer_group(IntPtr grplist) - => _ConsumerGroupListing_is_simple_consumer_group(grplist); - - private delegate ConsumerGroupState _ConsumerGroupListing_state_delegate(IntPtr grplist); - private static _ConsumerGroupListing_state_delegate _ConsumerGroupListing_state; - internal static ConsumerGroupState ConsumerGroupListing_state(IntPtr grplist) - => _ConsumerGroupListing_state(grplist); - - private delegate IntPtr _ListConsumerGroups_result_valid_delegate(IntPtr result, out UIntPtr cntp); - private static _ListConsumerGroups_result_valid_delegate _ListConsumerGroups_result_valid; - internal static IntPtr ListConsumerGroups_result_valid(IntPtr result, out UIntPtr cntp) - => _ListConsumerGroups_result_valid(result, out cntp); - - private delegate IntPtr _ListConsumerGroups_result_errors_delegate(IntPtr result, out UIntPtr cntp); - private static _ListConsumerGroups_result_errors_delegate _ListConsumerGroups_result_errors; - internal static IntPtr ListConsumerGroups_result_errors(IntPtr result, out UIntPtr cntp) - => _ListConsumerGroups_result_errors(result, out cntp); - - private delegate void _DescribeConsumerGroups_delegate( - IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] groups, UIntPtr groupsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - private static _DescribeConsumerGroups_delegate _DescribeConsumerGroups; - internal static void DescribeConsumerGroups( - IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] groups, UIntPtr groupsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr) - => _DescribeConsumerGroups(handle, groups, groupsCnt, optionsPtr, resultQueuePtr); - - private delegate IntPtr _DescribeConsumerGroups_result_groups_delegate(IntPtr result, out UIntPtr cntp); - private static _DescribeConsumerGroups_result_groups_delegate _DescribeConsumerGroups_result_groups; - internal static IntPtr DescribeConsumerGroups_result_groups(IntPtr result, out UIntPtr cntp) - => _DescribeConsumerGroups_result_groups(result, out cntp); - - private delegate IntPtr _ConsumerGroupDescription_group_id_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_group_id_delegate _ConsumerGroupDescription_group_id; - internal static IntPtr ConsumerGroupDescription_group_id(IntPtr grpdesc) - => _ConsumerGroupDescription_group_id(grpdesc); - - private delegate IntPtr _ConsumerGroupDescription_error_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_error_delegate _ConsumerGroupDescription_error; - internal static IntPtr ConsumerGroupDescription_error(IntPtr grpdesc) - => _ConsumerGroupDescription_error(grpdesc); - - private delegate int _ConsumerGroupDescription_is_simple_consumer_group_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_is_simple_consumer_group_delegate _ConsumerGroupDescription_is_simple_consumer_group; - internal static int ConsumerGroupDescription_is_simple_consumer_group(IntPtr grpdesc) - => _ConsumerGroupDescription_is_simple_consumer_group(grpdesc); - - private delegate IntPtr _ConsumerGroupDescription_partition_assignor_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_partition_assignor_delegate _ConsumerGroupDescription_partition_assignor; - internal static IntPtr ConsumerGroupDescription_partition_assignor(IntPtr grpdesc) - => _ConsumerGroupDescription_partition_assignor(grpdesc); - - private delegate ConsumerGroupState _ConsumerGroupDescription_state_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_state_delegate _ConsumerGroupDescription_state; - internal static ConsumerGroupState ConsumerGroupDescription_state(IntPtr grpdesc) { + private delegate void _ListConsumerGroups_delegate(IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr); + private static _ListConsumerGroups_delegate _ListConsumerGroups; + internal static void ListConsumerGroups(IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr) + => _ListConsumerGroups(handle, optionsPtr, resultQueuePtr); + + private delegate IntPtr _ConsumerGroupListing_group_id_delegate(IntPtr grplist); + private static _ConsumerGroupListing_group_id_delegate _ConsumerGroupListing_group_id; + internal static IntPtr ConsumerGroupListing_group_id(IntPtr grplist) + => _ConsumerGroupListing_group_id(grplist); + + private delegate IntPtr _ConsumerGroupListing_is_simple_consumer_group_delegate(IntPtr grplist); + private static _ConsumerGroupListing_is_simple_consumer_group_delegate _ConsumerGroupListing_is_simple_consumer_group; + internal static IntPtr ConsumerGroupListing_is_simple_consumer_group(IntPtr grplist) + => _ConsumerGroupListing_is_simple_consumer_group(grplist); + + private delegate ConsumerGroupState _ConsumerGroupListing_state_delegate(IntPtr grplist); + private static _ConsumerGroupListing_state_delegate _ConsumerGroupListing_state; + internal static ConsumerGroupState ConsumerGroupListing_state(IntPtr grplist) + => _ConsumerGroupListing_state(grplist); + + private delegate IntPtr _ListConsumerGroups_result_valid_delegate(IntPtr result, out UIntPtr cntp); + private static _ListConsumerGroups_result_valid_delegate _ListConsumerGroups_result_valid; + internal static IntPtr ListConsumerGroups_result_valid(IntPtr result, out UIntPtr cntp) + => _ListConsumerGroups_result_valid(result, out cntp); + + private delegate IntPtr _ListConsumerGroups_result_errors_delegate(IntPtr result, out UIntPtr cntp); + private static _ListConsumerGroups_result_errors_delegate _ListConsumerGroups_result_errors; + internal static IntPtr ListConsumerGroups_result_errors(IntPtr result, out UIntPtr cntp) + => _ListConsumerGroups_result_errors(result, out cntp); + + private delegate void _DescribeConsumerGroups_delegate( + IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] groups, UIntPtr groupsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); + private static _DescribeConsumerGroups_delegate _DescribeConsumerGroups; + internal static void DescribeConsumerGroups( + IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] groups, UIntPtr groupsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr) + => _DescribeConsumerGroups(handle, groups, groupsCnt, optionsPtr, resultQueuePtr); + + private delegate IntPtr _DescribeConsumerGroups_result_groups_delegate(IntPtr result, out UIntPtr cntp); + private static _DescribeConsumerGroups_result_groups_delegate _DescribeConsumerGroups_result_groups; + internal static IntPtr DescribeConsumerGroups_result_groups(IntPtr result, out UIntPtr cntp) + => _DescribeConsumerGroups_result_groups(result, out cntp); + + private delegate IntPtr _ConsumerGroupDescription_group_id_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_group_id_delegate _ConsumerGroupDescription_group_id; + internal static IntPtr ConsumerGroupDescription_group_id(IntPtr grpdesc) + => _ConsumerGroupDescription_group_id(grpdesc); + + private delegate IntPtr _ConsumerGroupDescription_error_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_error_delegate _ConsumerGroupDescription_error; + internal static IntPtr ConsumerGroupDescription_error(IntPtr grpdesc) + => _ConsumerGroupDescription_error(grpdesc); + + private delegate int _ConsumerGroupDescription_is_simple_consumer_group_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_is_simple_consumer_group_delegate _ConsumerGroupDescription_is_simple_consumer_group; + internal static int ConsumerGroupDescription_is_simple_consumer_group(IntPtr grpdesc) + => _ConsumerGroupDescription_is_simple_consumer_group(grpdesc); + + private delegate IntPtr _ConsumerGroupDescription_partition_assignor_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_partition_assignor_delegate _ConsumerGroupDescription_partition_assignor; + internal static IntPtr ConsumerGroupDescription_partition_assignor(IntPtr grpdesc) + => _ConsumerGroupDescription_partition_assignor(grpdesc); + + private delegate ConsumerGroupState _ConsumerGroupDescription_state_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_state_delegate _ConsumerGroupDescription_state; + internal static ConsumerGroupState ConsumerGroupDescription_state(IntPtr grpdesc) + { return _ConsumerGroupDescription_state(grpdesc); - } - - private delegate IntPtr _ConsumerGroupDescription_coordinator_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_coordinator_delegate _ConsumerGroupDescription_coordinator; - internal static IntPtr ConsumerGroupDescription_coordinator(IntPtr grpdesc) - => _ConsumerGroupDescription_coordinator(grpdesc); - - private delegate IntPtr _ConsumerGroupDescription_member_count_delegate(IntPtr grpdesc); - private static _ConsumerGroupDescription_member_count_delegate _ConsumerGroupDescription_member_count; - internal static IntPtr ConsumerGroupDescription_member_count(IntPtr grpdesc) - => _ConsumerGroupDescription_member_count(grpdesc); - - private delegate IntPtr _ConsumerGroupDescription_authorized_operations_delegate(IntPtr grpdesc, out UIntPtr cntp); - private static _ConsumerGroupDescription_authorized_operations_delegate _ConsumerGroupDescription_authorized_operations; - internal static IntPtr ConsumerGroupDescription_authorized_operations(IntPtr grpdesc, out UIntPtr cntp) - => _ConsumerGroupDescription_authorized_operations(grpdesc, out cntp); - - private delegate IntPtr _ConsumerGroupDescription_member_delegate(IntPtr grpdesc, IntPtr idx); - private static _ConsumerGroupDescription_member_delegate _ConsumerGroupDescription_member; - internal static IntPtr ConsumerGroupDescription_member(IntPtr grpdesc, IntPtr idx) - => _ConsumerGroupDescription_member(grpdesc, idx); - - private delegate IntPtr _MemberDescription_client_id_delegate(IntPtr member); - private static _MemberDescription_client_id_delegate _MemberDescription_client_id; - internal static IntPtr MemberDescription_client_id(IntPtr member) - => _MemberDescription_client_id(member); - - private delegate IntPtr _MemberDescription_group_instance_id_delegate(IntPtr member); - private static _MemberDescription_group_instance_id_delegate _MemberDescription_group_instance_id; - internal static IntPtr MemberDescription_group_instance_id(IntPtr member) - => _MemberDescription_group_instance_id(member); - - private delegate IntPtr _MemberDescription_consumer_id_delegate(IntPtr member); - private static _MemberDescription_consumer_id_delegate _MemberDescription_consumer_id; - internal static IntPtr MemberDescription_consumer_id(IntPtr member) - => _MemberDescription_consumer_id(member); - - private delegate IntPtr _MemberDescription_host_delegate(IntPtr member); - private static _MemberDescription_host_delegate _MemberDescription_host; - internal static IntPtr MemberDescription_host(IntPtr member) - => _MemberDescription_host(member); - - private delegate IntPtr _MemberDescription_assignment_delegate(IntPtr member); - private static _MemberDescription_assignment_delegate _MemberDescription_assignment; - internal static IntPtr MemberDescription_assignment(IntPtr member) - => _MemberDescription_assignment(member); - - private delegate IntPtr _MemberAssignment_partitions_delegate(IntPtr assignment); - private static _MemberAssignment_partitions_delegate _MemberAssignment_partitions; - internal static IntPtr MemberAssignment_topic_partitions(IntPtr assignment) - => _MemberAssignment_partitions(assignment); + } + + private delegate IntPtr _ConsumerGroupDescription_coordinator_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_coordinator_delegate _ConsumerGroupDescription_coordinator; + internal static IntPtr ConsumerGroupDescription_coordinator(IntPtr grpdesc) + => _ConsumerGroupDescription_coordinator(grpdesc); + + private delegate IntPtr _ConsumerGroupDescription_member_count_delegate(IntPtr grpdesc); + private static _ConsumerGroupDescription_member_count_delegate _ConsumerGroupDescription_member_count; + internal static IntPtr ConsumerGroupDescription_member_count(IntPtr grpdesc) + => _ConsumerGroupDescription_member_count(grpdesc); + + private delegate IntPtr _ConsumerGroupDescription_authorized_operations_delegate(IntPtr grpdesc, out UIntPtr cntp); + private static _ConsumerGroupDescription_authorized_operations_delegate _ConsumerGroupDescription_authorized_operations; + internal static IntPtr ConsumerGroupDescription_authorized_operations(IntPtr grpdesc, out UIntPtr cntp) + => _ConsumerGroupDescription_authorized_operations(grpdesc, out cntp); + + private delegate IntPtr _ConsumerGroupDescription_member_delegate(IntPtr grpdesc, IntPtr idx); + private static _ConsumerGroupDescription_member_delegate _ConsumerGroupDescription_member; + internal static IntPtr ConsumerGroupDescription_member(IntPtr grpdesc, IntPtr idx) + => _ConsumerGroupDescription_member(grpdesc, idx); + + private delegate IntPtr _MemberDescription_client_id_delegate(IntPtr member); + private static _MemberDescription_client_id_delegate _MemberDescription_client_id; + internal static IntPtr MemberDescription_client_id(IntPtr member) + => _MemberDescription_client_id(member); + + private delegate IntPtr _MemberDescription_group_instance_id_delegate(IntPtr member); + private static _MemberDescription_group_instance_id_delegate _MemberDescription_group_instance_id; + internal static IntPtr MemberDescription_group_instance_id(IntPtr member) + => _MemberDescription_group_instance_id(member); + + private delegate IntPtr _MemberDescription_consumer_id_delegate(IntPtr member); + private static _MemberDescription_consumer_id_delegate _MemberDescription_consumer_id; + internal static IntPtr MemberDescription_consumer_id(IntPtr member) + => _MemberDescription_consumer_id(member); + + private delegate IntPtr _MemberDescription_host_delegate(IntPtr member); + private static _MemberDescription_host_delegate _MemberDescription_host; + internal static IntPtr MemberDescription_host(IntPtr member) + => _MemberDescription_host(member); + + private delegate IntPtr _MemberDescription_assignment_delegate(IntPtr member); + private static _MemberDescription_assignment_delegate _MemberDescription_assignment; + internal static IntPtr MemberDescription_assignment(IntPtr member) + => _MemberDescription_assignment(member); + + private delegate IntPtr _MemberAssignment_partitions_delegate(IntPtr assignment); + private static _MemberAssignment_partitions_delegate _MemberAssignment_partitions; + internal static IntPtr MemberAssignment_topic_partitions(IntPtr assignment) + => _MemberAssignment_partitions(assignment); private delegate IntPtr _Node_id_delegate(IntPtr node); private static _Node_id_delegate _Node_id; @@ -2000,23 +2001,23 @@ internal static IntPtr MemberAssignment_topic_partitions(IntPtr assignment) private delegate void _ListOffsets_delegate(IntPtr handle, IntPtr topic_partition_list, IntPtr options, IntPtr resultQueuePtr); private static _ListOffsets_delegate _ListOffsets; internal static void ListOffsets(IntPtr handle, IntPtr topic_partition_list, IntPtr options, IntPtr resultQueuePtr) - => _ListOffsets(handle,topic_partition_list,options, resultQueuePtr); - - private delegate IntPtr _ListOffsets_result_infos_delegate(IntPtr resultPtr,out UIntPtr cntp); + => _ListOffsets(handle, topic_partition_list, options, resultQueuePtr); + + private delegate IntPtr _ListOffsets_result_infos_delegate(IntPtr resultPtr, out UIntPtr cntp); private static _ListOffsets_result_infos_delegate _ListOffsets_result_infos; - internal static IntPtr ListOffsets_result_infos(IntPtr resultPtr,out UIntPtr cntp) + internal static IntPtr ListOffsets_result_infos(IntPtr resultPtr, out UIntPtr cntp) => _ListOffsets_result_infos(resultPtr, out cntp); - + private delegate long _ListOffsetsResultInfo_timestamp_delegate(IntPtr element); private static _ListOffsetsResultInfo_timestamp_delegate _ListOffsetsResultInfo_timestamp; internal static long ListOffsetsResultInfo_timestamp(IntPtr element) => _ListOffsetsResultInfo_timestamp(element); - + private delegate IntPtr _ListOffsetsResultInfo_topic_partition_delegate(IntPtr element); private static _ListOffsetsResultInfo_topic_partition_delegate _ListOffsetsResultInfo_topic_partition; internal static IntPtr ListOffsetsResultInfo_topic_partition(IntPtr element) => _ListOffsetsResultInfo_topic_partition(element); - + private static Func _topic_result_error; internal static ErrorCode topic_result_error(IntPtr topicres) => _topic_result_error(topicres); @@ -2034,18 +2035,18 @@ internal static IntPtr ListOffsetsResultInfo_topic_partition(IntPtr element) private static Func _group_result_partitions; internal static IntPtr group_result_partitions(IntPtr groupres) => _group_result_partitions(groupres); - + // // User SCRAM credentials // - + private delegate void _DescribeUserScramCredentials_delegate( IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] users, UIntPtr usersCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); private static _DescribeUserScramCredentials_delegate _DescribeUserScramCredentials; internal static void DescribeUserScramCredentials( IntPtr handle, [MarshalAs(UnmanagedType.LPArray)] string[] users, UIntPtr usersCnt, IntPtr optionsPtr, IntPtr resultQueuePtr) => _DescribeUserScramCredentials(handle, users, usersCnt, optionsPtr, resultQueuePtr); - + private delegate IntPtr _DescribeUserScramCredentials_result_descriptions_delegate( IntPtr event_result, out UIntPtr cntp); private static _DescribeUserScramCredentials_result_descriptions_delegate _DescribeUserScramCredentials_result_descriptions; @@ -2079,7 +2080,7 @@ private delegate IntPtr _UserScramCredentialsDescription_scramcredentialinfo_del private static _UserScramCredentialsDescription_scramcredentialinfo_delegate _UserScramCredentialsDescription_scramcredentialinfo; internal static IntPtr UserScramCredentialsDescription_scramcredentialinfo( IntPtr description, int i) - => _UserScramCredentialsDescription_scramcredentialinfo(description,i); + => _UserScramCredentialsDescription_scramcredentialinfo(description, i); private delegate ScramMechanism _ScramCredentialInfo_mechanism_delegate( IntPtr scramcredentialinfo); @@ -2109,8 +2110,8 @@ private delegate IntPtr _UserScramCredentialDeletion_new_delegate( string user, ScramMechanism mechanism); private static _UserScramCredentialDeletion_new_delegate _UserScramCredentialDeletion_new; internal static IntPtr UserScramCredentialDeletion_new( - string user,ScramMechanism mechanism) - => _UserScramCredentialDeletion_new(user,mechanism); + string user, ScramMechanism mechanism) + => _UserScramCredentialDeletion_new(user, mechanism); private delegate void _UserScramCredentialAlteration_destroy_delegate( IntPtr alteration); @@ -2118,7 +2119,7 @@ private delegate void _UserScramCredentialAlteration_destroy_delegate( internal static void UserScramCredentialAlteration_destroy( IntPtr alteration) => _UserScramCredentialAlteration_destroy(alteration); - + private delegate ErrorCode _AlterUserScramCredentials_delegate( IntPtr handle, IntPtr[] alterations, UIntPtr alterationsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); @@ -2170,88 +2171,88 @@ private delegate void _TopicCollection_destroy_delegate( internal static void TopicCollection_destroy(IntPtr topic_collection) => _TopicCollection_destroy(topic_collection); - private delegate IntPtr _DescribeTopics_result_topics_delegate(IntPtr result, out UIntPtr cntp); - private static _DescribeTopics_result_topics_delegate _DescribeTopics_result_topics; - internal static IntPtr DescribeTopics_result_topics(IntPtr result, out UIntPtr cntp) - => _DescribeTopics_result_topics(result, out cntp); - - private delegate IntPtr _TopicDescription_error_delegate(IntPtr topicdesc); - private static _TopicDescription_error_delegate _TopicDescription_error; - internal static IntPtr TopicDescription_error(IntPtr topicdesc) - => _TopicDescription_error(topicdesc); - - private delegate IntPtr _TopicDescription_name_delegate(IntPtr topicdesc); - private static _TopicDescription_name_delegate _TopicDescription_name; - internal static IntPtr TopicDescription_name(IntPtr topicdesc) - => _TopicDescription_name(topicdesc); - - - private delegate IntPtr _TopicDescription_topic_id_delegate(IntPtr topicdesc); - private static _TopicDescription_topic_id_delegate _TopicDescription_topic_id; - internal static IntPtr TopicDescription_topic_id(IntPtr topicdesc) - => _TopicDescription_topic_id(topicdesc); - - private delegate IntPtr _TopicDescription_partitions_delegate(IntPtr topicdesc, out UIntPtr cntp); - private static _TopicDescription_partitions_delegate _TopicDescription_partitions; - internal static IntPtr TopicDescription_partitions(IntPtr topicdesc, out UIntPtr cntp) - => _TopicDescription_partitions(topicdesc, out cntp); - - private delegate IntPtr _TopicDescription_is_internal_delegate(IntPtr topicdesc); - private static _TopicDescription_is_internal_delegate _TopicDescription_is_internal; - internal static IntPtr TopicDescription_is_internal(IntPtr topicdesc) - => _TopicDescription_is_internal(topicdesc); - - private delegate IntPtr _TopicDescription_authorized_operations_delegate(IntPtr topicdesc, out UIntPtr cntp); - private static _TopicDescription_authorized_operations_delegate _TopicDescription_authorized_operations; - internal static IntPtr TopicDescription_authorized_operations(IntPtr topicdesc, out UIntPtr cntp) - => _TopicDescription_authorized_operations(topicdesc, out cntp); - - private delegate IntPtr _TopicPartitionInfo_isr_delegate(IntPtr topic_partition_info, out UIntPtr cntp); - private static _TopicPartitionInfo_isr_delegate _TopicPartitionInfo_isr; - internal static IntPtr TopicPartitionInfo_isr(IntPtr topic_partition_info, out UIntPtr cntp) - => _TopicPartitionInfo_isr(topic_partition_info, out cntp); - - private delegate IntPtr _TopicPartitionInfo_leader_delegate(IntPtr topic_partition_info); - private static _TopicPartitionInfo_leader_delegate _TopicPartitionInfo_leader; - internal static IntPtr TopicPartitionInfo_leader(IntPtr topic_partition_info) - => _TopicPartitionInfo_leader(topic_partition_info); - - private delegate int _TopicPartitionInfo_partition_delegate(IntPtr topic_partition_info); - private static _TopicPartitionInfo_partition_delegate _TopicPartitionInfo_partition; - internal static int TopicPartitionInfo_partition(IntPtr topic_partition_info) - => _TopicPartitionInfo_partition(topic_partition_info); - - private delegate IntPtr _TopicPartitionInfo_replicas_delegate(IntPtr topic_partition_info, out UIntPtr cntp); - private static _TopicPartitionInfo_replicas_delegate _TopicPartitionInfo_replicas; - internal static IntPtr TopicPartitionInfo_replicas(IntPtr topic_partition_info, out UIntPtr cntp) - => _TopicPartitionInfo_replicas(topic_partition_info, out cntp); - - private delegate void _DescribeCluster_delegate( - IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr); - private static _DescribeCluster_delegate _DescribeCluster; - internal static void DescribeCluster( - IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr) - => _DescribeCluster(handle, optionsPtr, resultQueuePtr); - - private delegate IntPtr _DescribeCluster_result_nodes_delegate(IntPtr result, out UIntPtr cntp); - private static _DescribeCluster_result_nodes_delegate _DescribeCluster_result_nodes; - internal static IntPtr DescribeCluster_result_nodes(IntPtr result, out UIntPtr cntp) - => _DescribeCluster_result_nodes(result, out cntp); - - private delegate IntPtr _DescribeCluster_result_authorized_operations_delegate(IntPtr result, out UIntPtr cntp); - private static _DescribeCluster_result_authorized_operations_delegate _DescribeCluster_result_authorized_operations; - internal static IntPtr DescribeCluster_result_authorized_operations(IntPtr result, out UIntPtr cntp) - => _DescribeCluster_result_authorized_operations(result, out cntp); - - private delegate IntPtr _DescribeCluster_result_controller_delegate(IntPtr result); - private static _DescribeCluster_result_controller_delegate _DescribeCluster_result_controller; - internal static IntPtr DescribeCluster_result_controller(IntPtr result) - => _DescribeCluster_result_controller(result); - - private delegate IntPtr _DescribeCluster_result_cluster_id_delegate(IntPtr result); - private static _DescribeCluster_result_cluster_id_delegate _DescribeCluster_result_cluster_id; - internal static IntPtr DescribeCluster_result_cluster_id(IntPtr result) - => _DescribeCluster_result_cluster_id(result); + private delegate IntPtr _DescribeTopics_result_topics_delegate(IntPtr result, out UIntPtr cntp); + private static _DescribeTopics_result_topics_delegate _DescribeTopics_result_topics; + internal static IntPtr DescribeTopics_result_topics(IntPtr result, out UIntPtr cntp) + => _DescribeTopics_result_topics(result, out cntp); + + private delegate IntPtr _TopicDescription_error_delegate(IntPtr topicdesc); + private static _TopicDescription_error_delegate _TopicDescription_error; + internal static IntPtr TopicDescription_error(IntPtr topicdesc) + => _TopicDescription_error(topicdesc); + + private delegate IntPtr _TopicDescription_name_delegate(IntPtr topicdesc); + private static _TopicDescription_name_delegate _TopicDescription_name; + internal static IntPtr TopicDescription_name(IntPtr topicdesc) + => _TopicDescription_name(topicdesc); + + + private delegate IntPtr _TopicDescription_topic_id_delegate(IntPtr topicdesc); + private static _TopicDescription_topic_id_delegate _TopicDescription_topic_id; + internal static IntPtr TopicDescription_topic_id(IntPtr topicdesc) + => _TopicDescription_topic_id(topicdesc); + + private delegate IntPtr _TopicDescription_partitions_delegate(IntPtr topicdesc, out UIntPtr cntp); + private static _TopicDescription_partitions_delegate _TopicDescription_partitions; + internal static IntPtr TopicDescription_partitions(IntPtr topicdesc, out UIntPtr cntp) + => _TopicDescription_partitions(topicdesc, out cntp); + + private delegate IntPtr _TopicDescription_is_internal_delegate(IntPtr topicdesc); + private static _TopicDescription_is_internal_delegate _TopicDescription_is_internal; + internal static IntPtr TopicDescription_is_internal(IntPtr topicdesc) + => _TopicDescription_is_internal(topicdesc); + + private delegate IntPtr _TopicDescription_authorized_operations_delegate(IntPtr topicdesc, out UIntPtr cntp); + private static _TopicDescription_authorized_operations_delegate _TopicDescription_authorized_operations; + internal static IntPtr TopicDescription_authorized_operations(IntPtr topicdesc, out UIntPtr cntp) + => _TopicDescription_authorized_operations(topicdesc, out cntp); + + private delegate IntPtr _TopicPartitionInfo_isr_delegate(IntPtr topic_partition_info, out UIntPtr cntp); + private static _TopicPartitionInfo_isr_delegate _TopicPartitionInfo_isr; + internal static IntPtr TopicPartitionInfo_isr(IntPtr topic_partition_info, out UIntPtr cntp) + => _TopicPartitionInfo_isr(topic_partition_info, out cntp); + + private delegate IntPtr _TopicPartitionInfo_leader_delegate(IntPtr topic_partition_info); + private static _TopicPartitionInfo_leader_delegate _TopicPartitionInfo_leader; + internal static IntPtr TopicPartitionInfo_leader(IntPtr topic_partition_info) + => _TopicPartitionInfo_leader(topic_partition_info); + + private delegate int _TopicPartitionInfo_partition_delegate(IntPtr topic_partition_info); + private static _TopicPartitionInfo_partition_delegate _TopicPartitionInfo_partition; + internal static int TopicPartitionInfo_partition(IntPtr topic_partition_info) + => _TopicPartitionInfo_partition(topic_partition_info); + + private delegate IntPtr _TopicPartitionInfo_replicas_delegate(IntPtr topic_partition_info, out UIntPtr cntp); + private static _TopicPartitionInfo_replicas_delegate _TopicPartitionInfo_replicas; + internal static IntPtr TopicPartitionInfo_replicas(IntPtr topic_partition_info, out UIntPtr cntp) + => _TopicPartitionInfo_replicas(topic_partition_info, out cntp); + + private delegate void _DescribeCluster_delegate( + IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr); + private static _DescribeCluster_delegate _DescribeCluster; + internal static void DescribeCluster( + IntPtr handle, IntPtr optionsPtr, IntPtr resultQueuePtr) + => _DescribeCluster(handle, optionsPtr, resultQueuePtr); + + private delegate IntPtr _DescribeCluster_result_nodes_delegate(IntPtr result, out UIntPtr cntp); + private static _DescribeCluster_result_nodes_delegate _DescribeCluster_result_nodes; + internal static IntPtr DescribeCluster_result_nodes(IntPtr result, out UIntPtr cntp) + => _DescribeCluster_result_nodes(result, out cntp); + + private delegate IntPtr _DescribeCluster_result_authorized_operations_delegate(IntPtr result, out UIntPtr cntp); + private static _DescribeCluster_result_authorized_operations_delegate _DescribeCluster_result_authorized_operations; + internal static IntPtr DescribeCluster_result_authorized_operations(IntPtr result, out UIntPtr cntp) + => _DescribeCluster_result_authorized_operations(result, out cntp); + + private delegate IntPtr _DescribeCluster_result_controller_delegate(IntPtr result); + private static _DescribeCluster_result_controller_delegate _DescribeCluster_result_controller; + internal static IntPtr DescribeCluster_result_controller(IntPtr result) + => _DescribeCluster_result_controller(result); + + private delegate IntPtr _DescribeCluster_result_cluster_id_delegate(IntPtr result); + private static _DescribeCluster_result_cluster_id_delegate _DescribeCluster_result_cluster_id; + internal static IntPtr DescribeCluster_result_cluster_id(IntPtr result) + => _DescribeCluster_result_cluster_id(result); // // Queues diff --git a/src/Confluent.Kafka/Impl/Metadata.cs b/src/Confluent.Kafka/Impl/Metadata.cs index 40958825a..ded1a811b 100644 --- a/src/Confluent.Kafka/Impl/Metadata.cs +++ b/src/Confluent.Kafka/Impl/Metadata.cs @@ -23,14 +23,16 @@ namespace Confluent.Kafka.Impl { [StructLayout(LayoutKind.Sequential)] - struct rd_kafka_metadata_broker { + struct rd_kafka_metadata_broker + { internal int id; internal string host; internal int port; } [StructLayout(LayoutKind.Sequential)] - struct rd_kafka_metadata_partition { + struct rd_kafka_metadata_partition + { internal int id; internal ErrorCode err; internal int leader; @@ -41,7 +43,8 @@ struct rd_kafka_metadata_partition { } [StructLayout(LayoutKind.Sequential)] - struct rd_kafka_metadata_topic { + struct rd_kafka_metadata_topic + { internal string topic; internal int partition_cnt; internal /* struct rd_kafka_metadata_partition * */ IntPtr partitions; @@ -49,7 +52,8 @@ struct rd_kafka_metadata_topic { } [StructLayout(LayoutKind.Sequential)] - struct rd_kafka_metadata { + struct rd_kafka_metadata + { internal int broker_cnt; internal /* struct rd_kafka_metadata_broker * */ IntPtr brokers; internal int topic_cnt; @@ -90,7 +94,7 @@ struct rd_kafka_group_list internal IntPtr groups; internal int group_cnt; }; - + enum rd_kafka_vtype { End, // va-arg sentinel diff --git a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods.cs b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods.cs index 0fac01c87..80e61b14d 100644 --- a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods.cs +++ b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods.cs @@ -558,11 +558,11 @@ internal static extern IntPtr rd_kafka_AdminOptions_set_isolation_level( IntPtr options, IntPtr isolation_level); - [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AdminOptions_set_match_consumer_group_states( - IntPtr options, - ConsumerGroupState[] states, - UIntPtr statesCnt); + IntPtr options, + ConsumerGroupState[] states, + UIntPtr statesCnt); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_NewTopic_new( @@ -722,7 +722,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_name( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_value ( + internal static extern IntPtr rd_kafka_ConfigEntry_value( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -742,7 +742,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_is_sensitive( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym ( + internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -811,7 +811,7 @@ internal static extern IntPtr rd_kafka_ConfigResource_error_string( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_AlterConfigs ( + internal static extern void rd_kafka_AlterConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ** */ IntPtr[] configs, UIntPtr config_cnt, @@ -822,7 +822,7 @@ internal static extern void rd_kafka_AlterConfigs ( internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_AlterConfigs_result_resources( /* rd_kafka_AlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_IncrementalAlterConfigs( /* rd_kafka_t * */ IntPtr rk, @@ -830,14 +830,14 @@ internal static extern void rd_kafka_IncrementalAlterConfigs( UIntPtr config_cnt, /* rd_kafka_AdminOptions_t * */ IntPtr options, /* rd_kafka_queue_t * */ IntPtr rkqu); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_IncrementalAlterConfigs_result_resources( /* rd_kafka_IncrementalAlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_DescribeConfigs ( + internal static extern void rd_kafka_DescribeConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ***/ IntPtr[] configs, UIntPtr config_cnt, @@ -1132,7 +1132,7 @@ internal static extern void rd_kafka_DescribeUserScramCredentials( UIntPtr usersCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( IntPtr handle, @@ -1140,7 +1140,7 @@ internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( UIntPtr alterationsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialDeletion_new( string user, @@ -1159,7 +1159,7 @@ internal static extern IntPtr rd_kafka_UserScramCredentialUpsertion_new( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_UserScramCredentialAlteration_destroy( IntPtr alteration); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descriptions( IntPtr event_result, @@ -1174,24 +1174,24 @@ internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descr [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(IntPtr description); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialsDescription_scramcredentialinfo(IntPtr description, int i); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ScramMechanism rd_kafka_ScramCredentialInfo_mechanism(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_ScramCredentialInfo_iterations(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_responses( IntPtr event_result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_user(IntPtr element); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_error(IntPtr element); @@ -1213,14 +1213,14 @@ internal static extern void rd_kafka_DescribeTopics( IntPtr topicCollection, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeTopics_result_topics(IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs(UnmanagedType.LPArray)] string[] topics, UIntPtr topicsCnt); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_TopicCollection_destroy(IntPtr topic_collection); @@ -1247,7 +1247,7 @@ internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicPartitionInfo_leader(IntPtr topic_partition_info); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_TopicPartitionInfo_partition(IntPtr topic_partition_info); diff --git a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Alpine.cs b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Alpine.cs index ee40ec23b..00c159c58 100644 --- a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Alpine.cs +++ b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Alpine.cs @@ -562,11 +562,11 @@ internal static extern IntPtr rd_kafka_AdminOptions_set_isolation_level( IntPtr options, IntPtr isolation_level); - [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AdminOptions_set_match_consumer_group_states( - IntPtr options, - ConsumerGroupState[] states, - UIntPtr statesCnt); + IntPtr options, + ConsumerGroupState[] states, + UIntPtr statesCnt); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_NewTopic_new( @@ -726,7 +726,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_name( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_value ( + internal static extern IntPtr rd_kafka_ConfigEntry_value( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -746,7 +746,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_is_sensitive( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym ( + internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -815,7 +815,7 @@ internal static extern IntPtr rd_kafka_ConfigResource_error_string( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_AlterConfigs ( + internal static extern void rd_kafka_AlterConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ** */ IntPtr[] configs, UIntPtr config_cnt, @@ -826,7 +826,7 @@ internal static extern void rd_kafka_AlterConfigs ( internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_AlterConfigs_result_resources( /* rd_kafka_AlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_IncrementalAlterConfigs( /* rd_kafka_t * */ IntPtr rk, @@ -834,14 +834,14 @@ internal static extern void rd_kafka_IncrementalAlterConfigs( UIntPtr config_cnt, /* rd_kafka_AdminOptions_t * */ IntPtr options, /* rd_kafka_queue_t * */ IntPtr rkqu); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_IncrementalAlterConfigs_result_resources( /* rd_kafka_IncrementalAlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_DescribeConfigs ( + internal static extern void rd_kafka_DescribeConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ***/ IntPtr[] configs, UIntPtr config_cnt, @@ -1136,7 +1136,7 @@ internal static extern void rd_kafka_DescribeUserScramCredentials( UIntPtr usersCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( IntPtr handle, @@ -1144,7 +1144,7 @@ internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( UIntPtr alterationsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialDeletion_new( string user, @@ -1163,7 +1163,7 @@ internal static extern IntPtr rd_kafka_UserScramCredentialUpsertion_new( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_UserScramCredentialAlteration_destroy( IntPtr alteration); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descriptions( IntPtr event_result, @@ -1178,24 +1178,24 @@ internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descr [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(IntPtr description); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialsDescription_scramcredentialinfo(IntPtr description, int i); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ScramMechanism rd_kafka_ScramCredentialInfo_mechanism(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_ScramCredentialInfo_iterations(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_responses( IntPtr event_result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_user(IntPtr element); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_error(IntPtr element); @@ -1217,14 +1217,14 @@ internal static extern void rd_kafka_DescribeTopics( IntPtr topicCollection, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeTopics_result_topics(IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs(UnmanagedType.LPArray)] string[] topics, UIntPtr topicsCnt); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_TopicCollection_destroy(IntPtr topic_collection); @@ -1251,7 +1251,7 @@ internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicPartitionInfo_leader(IntPtr topic_partition_info); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_TopicPartitionInfo_partition(IntPtr topic_partition_info); diff --git a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Centos8.cs b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Centos8.cs index 5032651f5..3561559f5 100644 --- a/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Centos8.cs +++ b/src/Confluent.Kafka/Impl/NativeMethods/NativeMethods_Centos8.cs @@ -562,11 +562,11 @@ internal static extern IntPtr rd_kafka_AdminOptions_set_isolation_level( IntPtr options, IntPtr isolation_level); - [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AdminOptions_set_match_consumer_group_states( - IntPtr options, - ConsumerGroupState[] states, - UIntPtr statesCnt); + IntPtr options, + ConsumerGroupState[] states, + UIntPtr statesCnt); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_NewTopic_new( @@ -726,7 +726,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_name( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_value ( + internal static extern IntPtr rd_kafka_ConfigEntry_value( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -746,7 +746,7 @@ internal static extern IntPtr rd_kafka_ConfigEntry_is_sensitive( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym ( + internal static extern IntPtr rd_kafka_ConfigEntry_is_synonym( /* rd_kafka_ConfigEntry_t * */ IntPtr entry); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] @@ -815,7 +815,7 @@ internal static extern IntPtr rd_kafka_ConfigResource_error_string( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_AlterConfigs ( + internal static extern void rd_kafka_AlterConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ** */ IntPtr[] configs, UIntPtr config_cnt, @@ -826,7 +826,7 @@ internal static extern void rd_kafka_AlterConfigs ( internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_AlterConfigs_result_resources( /* rd_kafka_AlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_IncrementalAlterConfigs( /* rd_kafka_t * */ IntPtr rk, @@ -834,14 +834,14 @@ internal static extern void rd_kafka_IncrementalAlterConfigs( UIntPtr config_cnt, /* rd_kafka_AdminOptions_t * */ IntPtr options, /* rd_kafka_queue_t * */ IntPtr rkqu); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern /* rd_kafka_ConfigResource_t ** */ IntPtr rd_kafka_IncrementalAlterConfigs_result_resources( /* rd_kafka_IncrementalAlterConfigs_result_t * */ IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern void rd_kafka_DescribeConfigs ( + internal static extern void rd_kafka_DescribeConfigs( /* rd_kafka_t * */ IntPtr rk, /* rd_kafka_ConfigResource_t ***/ IntPtr[] configs, UIntPtr config_cnt, @@ -1136,7 +1136,7 @@ internal static extern void rd_kafka_DescribeUserScramCredentials( UIntPtr usersCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( IntPtr handle, @@ -1144,7 +1144,7 @@ internal static extern ErrorCode rd_kafka_AlterUserScramCredentials( UIntPtr alterationsCnt, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialDeletion_new( string user, @@ -1163,7 +1163,7 @@ internal static extern IntPtr rd_kafka_UserScramCredentialUpsertion_new( [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_UserScramCredentialAlteration_destroy( IntPtr alteration); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descriptions( IntPtr event_result, @@ -1178,24 +1178,24 @@ internal static extern IntPtr rd_kafka_DescribeUserScramCredentials_result_descr [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(IntPtr description); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_UserScramCredentialsDescription_scramcredentialinfo(IntPtr description, int i); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern ScramMechanism rd_kafka_ScramCredentialInfo_mechanism(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_ScramCredentialInfo_iterations(IntPtr scramcredentialinfo); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_responses( IntPtr event_result, out UIntPtr cntp); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_user(IntPtr element); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_AlterUserScramCredentials_result_response_error(IntPtr element); @@ -1217,14 +1217,14 @@ internal static extern void rd_kafka_DescribeTopics( IntPtr topicCollection, IntPtr optionsPtr, IntPtr resultQueuePtr); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_DescribeTopics_result_topics(IntPtr result, out UIntPtr cntp); [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs(UnmanagedType.LPArray)] string[] topics, UIntPtr topicsCnt); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern void rd_kafka_TopicCollection_destroy(IntPtr topic_collection); @@ -1251,7 +1251,7 @@ internal static extern IntPtr rd_kafka_TopicCollection_of_topic_names([MarshalAs [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rd_kafka_TopicPartitionInfo_leader(IntPtr topic_partition_info); - + [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rd_kafka_TopicPartitionInfo_partition(IntPtr topic_partition_info); diff --git a/src/Confluent.Kafka/Impl/PlatformApis.cs b/src/Confluent.Kafka/Impl/PlatformApis.cs index 40e736ea3..57e916e6e 100644 --- a/src/Confluent.Kafka/Impl/PlatformApis.cs +++ b/src/Confluent.Kafka/Impl/PlatformApis.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; namespace Confluent.Kafka.Impl diff --git a/src/Confluent.Kafka/Impl/SafeConfigHandle.cs b/src/Confluent.Kafka/Impl/SafeConfigHandle.cs index 01dea4d42..3e2482a9b 100644 --- a/src/Confluent.Kafka/Impl/SafeConfigHandle.cs +++ b/src/Confluent.Kafka/Impl/SafeConfigHandle.cs @@ -69,7 +69,7 @@ internal IntPtr Dup() internal Dictionary Dump() { - UIntPtr cntp = (UIntPtr) 0; + UIntPtr cntp = (UIntPtr)0; IntPtr data = Librdkafka.conf_dump(handle, out cntp); if (data == IntPtr.Zero) @@ -79,14 +79,14 @@ internal Dictionary Dump() try { - if (((int) cntp & 1) != 0) + if (((int)cntp & 1) != 0) { // Expect Key -> Value, so even number of strings throw new Exception("Invalid number of config entries"); } var dict = new Dictionary(); - for (int i = 0; i < (int) cntp / 2; ++i) + for (int i = 0; i < (int)cntp / 2; ++i) { dict.Add(Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, 2 * i * Util.Marshal.SizeOf())), Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, (2 * i + 1) * Util.Marshal.SizeOf()))); @@ -104,7 +104,7 @@ internal void Set(string name, string value) { var errorStringBuilder = new StringBuilder(Librdkafka.MaxErrorStringLength); ConfRes res = Librdkafka.conf_set(handle, name, value, - errorStringBuilder, (UIntPtr) errorStringBuilder.Capacity); + errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity); if (res == ConfRes.Ok) { return; @@ -125,13 +125,13 @@ internal void Set(string name, string value) internal string Get(string name) { - UIntPtr destSize = (UIntPtr) 0; + UIntPtr destSize = (UIntPtr)0; StringBuilder sb = null; ConfRes res = Librdkafka.conf_get(handle, name, null, ref destSize); if (res == ConfRes.Ok) { - sb = new StringBuilder((int) destSize); + sb = new StringBuilder((int)destSize); res = Librdkafka.conf_get(handle, name, sb, ref destSize); } if (res != ConfRes.Ok) diff --git a/src/Confluent.Kafka/Impl/SafeKafkaHandle.cs b/src/Confluent.Kafka/Impl/SafeKafkaHandle.cs index bd553b304..21716bf23 100644 --- a/src/Confluent.Kafka/Impl/SafeKafkaHandle.cs +++ b/src/Confluent.Kafka/Impl/SafeKafkaHandle.cs @@ -43,10 +43,10 @@ struct rd_kafka_message internal int partition; /* Partition */ internal /* void * */ IntPtr val; /* err==0: Message val * err!=0: Error string */ - internal UIntPtr len; /* err==0: Message val length + internal UIntPtr len; /* err==0: Message val length * err!=0: Error string length */ internal /* void * */ IntPtr key; /* err==0: Optional message key */ - internal UIntPtr key_len; /* err==0: Optional message key length */ + internal UIntPtr key_len; /* err==0: Optional message key length */ internal long offset; /* Consume: * Message offset (or offset for error * if err!=0 if applicable). @@ -112,7 +112,7 @@ internal SafeTopicHandle newTopic(string topic, IntPtr topicConfigPtr) } } - public SafeKafkaHandle() : base("kafka") {} + public SafeKafkaHandle() : base("kafka") { } /// /// This object is tightly coupled to the referencing Producer / @@ -134,7 +134,7 @@ public SafeKafkaHandle() : base("kafka") {} public static SafeKafkaHandle Create(RdKafkaType type, IntPtr config, IClient owner) { var errorStringBuilder = new StringBuilder(Librdkafka.MaxErrorStringLength); - var kh = Librdkafka.kafka_new(type, config, errorStringBuilder,(UIntPtr) errorStringBuilder.Capacity); + var kh = Librdkafka.kafka_new(type, config, errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity); if (kh.IsInvalid) { Librdkafka.conf_destroy(config); @@ -469,7 +469,8 @@ internal Metadata GetMetadata(bool allTopics, SafeTopicHandle topic, int millise if (err == ErrorCode.NoError) { - try { + try + { var meta = Util.Marshal.PtrToStructure(metaPtr); var brokers = Enumerable.Range(0, meta.broker_cnt) @@ -631,7 +632,7 @@ internal unsafe byte[] SerializeConsumerGroupMetadata(IntPtr consumerGroupMetada } var result = new byte[(int)dataSize]; byte* pIter = (byte*)buffer; - for (int i=0; i<(int)dataSize; ++i) + for (int i = 0; i < (int)dataSize; ++i) { result[i] = *pIter++; } @@ -659,7 +660,7 @@ internal WatermarkOffsets QueryWatermarkOffsets(string topic, int partition, int throw new KafkaException(CreatePossiblyFatalError(err, null)); } - return new WatermarkOffsets(low, high); + return new WatermarkOffsets(low, high); } internal WatermarkOffsets GetWatermarkOffsets(string topic, int partition) @@ -690,7 +691,7 @@ internal List OffsetsForTimes(IEnumerable topics) throw new ArgumentNullException("Subscribed-to topic must not be null"); } - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) topics.Count()); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)topics.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create topic partition list"); @@ -813,7 +814,7 @@ private void AssignImpl(IEnumerable partitions, IntPtr list = IntPtr.Zero; if (partitions != null) { - list = Librdkafka.topic_partition_list_new((IntPtr) partitions.Count()); + list = Librdkafka.topic_partition_list_new((IntPtr)partitions.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create topic partition list"); @@ -829,7 +830,7 @@ private void AssignImpl(IEnumerable partitions, IntPtr ptr = Librdkafka.topic_partition_list_add(list, partition.Topic, partition.Partition); Marshal.WriteInt64( ptr, - (int) Util.Marshal.OffsetOf("offset"), + (int)Util.Marshal.OffsetOf("offset"), partition.Offset); } } @@ -883,7 +884,8 @@ internal string RebalanceProtocol { ThrowIfHandleClosed(); var rebalanceProtocolPtr = Librdkafka.rebalance_protocol(handle); - if (rebalanceProtocolPtr == IntPtr.Zero) { + if (rebalanceProtocolPtr == IntPtr.Zero) + { return null; } return Util.Marshal.PtrToStringUTF8(rebalanceProtocolPtr); @@ -979,9 +981,9 @@ internal void Seek(string topic, Partition partition, Offset offset, int millise int? leaderEpoch = null) { ThrowIfHandleClosed(); - + ErrorCode result; - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) 1); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)1); if (list == IntPtr.Zero) { throw new Exception("Failed to create seek partition list"); @@ -992,9 +994,9 @@ internal void Seek(string topic, Partition partition, Offset offset, int millise Marshal.WriteInt64( listPartition, - (int) Util.Marshal.OffsetOf("offset"), + (int)Util.Marshal.OffsetOf("offset"), offset); - + if (leaderEpoch != null) { Librdkafka.topic_partition_set_leader_epoch(listPartition, @@ -1004,7 +1006,7 @@ internal void Seek(string topic, Partition partition, Offset offset, int millise IntPtr resultError = Librdkafka.seek_partitions( handle, list, (IntPtr)millisecondsTimeout); - + if (resultError != IntPtr.Zero) { result = Librdkafka.error_code(resultError); @@ -1013,7 +1015,7 @@ internal void Seek(string topic, Partition partition, Offset offset, int millise { result = ErrorCode.NoError; } - + if (result == ErrorCode.NoError) { var topicPartitionErrors = GetTopicPartitionErrorList(list); @@ -1025,7 +1027,7 @@ internal void Seek(string topic, Partition partition, Offset offset, int millise } } } - + Librdkafka.topic_partition_list_destroy(list); if (result != ErrorCode.NoError) @@ -1038,7 +1040,7 @@ internal List Pause(IEnumerable partitions) { ThrowIfHandleClosed(); - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) partitions.Count()); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)partitions.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create pause partition list"); @@ -1070,7 +1072,7 @@ internal List Resume(IEnumerable partitions { ThrowIfHandleClosed(); - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) partitions.Count()); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)partitions.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create resume partition list"); @@ -1103,7 +1105,7 @@ internal List Committed(IEnumerable partit { ThrowIfHandleClosed(); - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) partitions.Count()); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)partitions.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create committed partition list"); @@ -1135,7 +1137,7 @@ internal List Position(IEnumerable partiti { ThrowIfHandleClosed(); - IntPtr list = Librdkafka.topic_partition_list_new((IntPtr) partitions.Count()); + IntPtr list = Librdkafka.topic_partition_list_new((IntPtr)partitions.Count()); if (list == IntPtr.Zero) { throw new Exception("Failed to create position list"); @@ -1268,7 +1270,7 @@ internal static IntPtr GetCTopicPartitionList(IEnumerable } IntPtr ptr = Librdkafka.topic_partition_list_add(list, p.Topic, p.Partition); Marshal.WriteInt64(ptr, (int)Util.Marshal.OffsetOf("offset"), p.Offset); - + if (p.LeaderEpoch != null) { Librdkafka.topic_partition_set_leader_epoch(ptr, @@ -1317,8 +1319,8 @@ static byte[] CopyBytes(IntPtr ptr, IntPtr len) byte[] data = null; if (ptr != IntPtr.Zero) { - data = new byte[(int) len]; - Marshal.Copy(ptr, data, 0, (int) len); + data = new byte[(int)len]; + Marshal.Copy(ptr, data, 0, (int)len); } return data; } @@ -1531,7 +1533,7 @@ internal void AlterConfigs( Librdkafka.AlterConfigs(handle, configPtrs, (UIntPtr)configPtrs.Length, optionsPtr, resultQueuePtr); - for (int i=0; i deleteGroups, DeleteGroupsOptions optio } finally { - foreach(var deleteGroupPtr in deleteGroupsPtrs) + foreach (var deleteGroupPtr in deleteGroupsPtrs) { - if(deleteGroupPtr != IntPtr.Zero) + if (deleteGroupPtr != IntPtr.Zero) { Librdkafka.DeleteGroup_destroy(deleteGroupPtr); } @@ -2182,10 +2184,10 @@ internal void AlterConsumerGroupOffsets( { ThrowIfHandleClosed(); - // For now, we only support one group at a time given as a single element of groupsPartitions. - // Code has been written so that only this if-guard needs to be removed when we add support for - // multiple ConsumerGroupTopicPartitionOffsets. - if (groupsPartitions.Count() != 1) + // For now, we only support one group at a time given as a single element of groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when we add support for + // multiple ConsumerGroupTopicPartitionOffsets. + if (groupsPartitions.Count() != 1) { throw new ArgumentException("Can only alter offsets for one group at a time"); } @@ -2254,10 +2256,10 @@ internal void ListConsumerGroupOffsets( { ThrowIfHandleClosed(); - // For now, we only support one group at a time given as a single element of groupsPartitions. - // Code has been written so that only this if-guard needs to be removed when we add support for - // multiple ConsumerGroupTopicPartitions. - if (groupsPartitions.Count() != 1) + // For now, we only support one group at a time given as a single element of groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when we add support for + // multiple ConsumerGroupTopicPartitions. + if (groupsPartitions.Count() != 1) { throw new ArgumentException("Can only list offsets for one group at a time"); } @@ -2357,7 +2359,8 @@ internal void DescribeConsumerGroups(IEnumerable groups, DescribeConsume { ThrowIfHandleClosed(); - if (groups.Count() == 0) { + if (groups.Count() == 0) + { throw new ArgumentException("at least one group should be provided to DescribeConsumerGroups"); } @@ -2396,7 +2399,7 @@ internal void DescribeUserScramCredentials(IEnumerable users, DescribeUs throw new ArgumentException("Cannot have a null or empty user"); } } - + var optionsPtr = IntPtr.Zero; try { @@ -2408,7 +2411,7 @@ internal void DescribeUserScramCredentials(IEnumerable users, DescribeUs // Call DescribeUserScramCredentials (async). Librdkafka.DescribeUserScramCredentials( - handle, users.ToArray(), (UIntPtr) users.Count(), + handle, users.ToArray(), (UIntPtr)users.Count(), optionsPtr, resultQueuePtr); } finally @@ -2457,21 +2460,21 @@ internal void AlterUserScramCredentials(IEnumerable topicPartitionOf setOption_completionSource(optionsPtr, completionSourcePtr); topic_partition_list = Librdkafka.topic_partition_list_new((IntPtr)topicPartitionOffsets.Count()); - foreach(var topicPartitionOffset in topicPartitionOffsets) + foreach (var topicPartitionOffset in topicPartitionOffsets) { string topic = topicPartitionOffset.TopicPartition.Topic; Partition partition = topicPartitionOffset.TopicPartition.Partition; IntPtr topic_partition = Librdkafka.topic_partition_list_add(topic_partition_list, topic, partition); Marshal.WriteInt64( topic_partition, - (int) Util.Marshal.OffsetOf("offset"), + (int)Util.Marshal.OffsetOf("offset"), topicPartitionOffset.OffsetSpec.Value()); } Librdkafka.ListOffsets(handle, topic_partition_list, optionsPtr, resultQueuePtr); @@ -2539,7 +2542,7 @@ internal void DescribeTopics(TopicCollection topicCollection, DescribeTopicsOpti topicCollectionPtr = Librdkafka.TopicCollection_of_topic_names( topicCollection.Topics.ToArray(), (UIntPtr)topicCollection.Topics.Count()); - + // Set Admin Options if any. options = options ?? new DescribeTopicsOptions(); optionsPtr = Librdkafka.AdminOptions_new(handle, Librdkafka.AdminOp.DescribeTopics); @@ -2599,8 +2602,8 @@ internal void OAuthBearerSetToken(string tokenValue, long lifetimeMs, string pri var errorStringBuilder = new StringBuilder(Librdkafka.MaxErrorStringLength); var errorCode = Librdkafka.oauthbearer_set_token(handle, tokenValue, lifetimeMs, principalName, - extensionsArray, (UIntPtr) (extensionsArray?.Length ?? 0), - errorStringBuilder, (UIntPtr) errorStringBuilder.Capacity); + extensionsArray, (UIntPtr)(extensionsArray?.Length ?? 0), + errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity); if (errorCode != ErrorCode.NoError) { diff --git a/src/Confluent.Kafka/Impl/SafeTopicConfigHandle.cs b/src/Confluent.Kafka/Impl/SafeTopicConfigHandle.cs index 5e6c3deb6..caf1a53da 100644 --- a/src/Confluent.Kafka/Impl/SafeTopicConfigHandle.cs +++ b/src/Confluent.Kafka/Impl/SafeTopicConfigHandle.cs @@ -46,7 +46,7 @@ internal SafeTopicConfigHandle Duplicate() // TODO: deduplicate, merge with other one internal Dictionary Dump() { - UIntPtr cntp = (UIntPtr) 0; + UIntPtr cntp = (UIntPtr)0; IntPtr data = Librdkafka.topic_conf_dump(handle, out cntp); if (data == IntPtr.Zero) @@ -56,14 +56,14 @@ internal Dictionary Dump() try { - if (((int) cntp & 1) != 0) + if (((int)cntp & 1) != 0) { // Expect Key -> Value, so even number of strings throw new Exception("Invalid number of config entries"); } var dict = new Dictionary(); - for (int i = 0; i < (int) cntp / 2; ++i) + for (int i = 0; i < (int)cntp / 2; ++i) { dict.Add(Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, 2 * i * Util.Marshal.SizeOf())), Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, (2 * i + 1) * Util.Marshal.SizeOf()))); @@ -81,7 +81,7 @@ internal void Set(string name, string value) { var errorStringBuilder = new StringBuilder(Librdkafka.MaxErrorStringLength); ConfRes res = Librdkafka.topic_conf_set(handle, name, value, - errorStringBuilder, (UIntPtr) errorStringBuilder.Capacity); + errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity); if (res == ConfRes.Ok) { return; @@ -108,7 +108,7 @@ internal string Get(string name) ConfRes res = Librdkafka.topic_conf_get(handle, name, null, ref destSize); if (res == ConfRes.Ok) { - sb = new StringBuilder((int) destSize); + sb = new StringBuilder((int)destSize); res = Librdkafka.topic_conf_get(handle, name, sb, ref destSize); } if (res != ConfRes.Ok) diff --git a/src/Confluent.Kafka/Internal/Extensions/Dictionary.cs b/src/Confluent.Kafka/Internal/Extensions/Dictionary.cs index 837e35fe2..dc4b21c4e 100644 --- a/src/Confluent.Kafka/Internal/Extensions/Dictionary.cs +++ b/src/Confluent.Kafka/Internal/Extensions/Dictionary.cs @@ -23,4 +23,4 @@ internal static string[] ToStringArray(this IDictionary dictiona return result; } } -} \ No newline at end of file +} diff --git a/src/Confluent.Kafka/Internal/Extensions/String.cs b/src/Confluent.Kafka/Internal/Extensions/String.cs index 08c6bffa4..cc75b68a5 100644 --- a/src/Confluent.Kafka/Internal/Extensions/String.cs +++ b/src/Confluent.Kafka/Internal/Extensions/String.cs @@ -27,7 +27,7 @@ namespace Confluent.Kafka /// internal static class StringExtensions { - internal static Encoding ToEncoding(this string encodingName) + internal static Encoding ToEncoding(this string encodingName) { switch (encodingName.ToLower()) { @@ -49,6 +49,6 @@ internal static string Quote(this bool b) => b ? "true" : "false"; internal static string Quote(this string str) => - str == null ? "null" : $"\"{str.Replace("\"","\\\"")}\""; + str == null ? "null" : $"\"{str.Replace("\"", "\\\"")}\""; } } diff --git a/src/Confluent.Kafka/Internal/Util.cs b/src/Confluent.Kafka/Internal/Util.cs index 9647bbdaa..3161ae2ea 100644 --- a/src/Confluent.Kafka/Internal/Util.cs +++ b/src/Confluent.Kafka/Internal/Util.cs @@ -60,7 +60,7 @@ public unsafe static string PtrToStringUTF8(IntPtr strPtr) { return null; } - + // TODO: Is there a built in / vectorized / better way to implement this? byte* pTraverse = (byte*)strPtr; while (*pTraverse != 0) { pTraverse += 1; } diff --git a/src/Confluent.Kafka/KafkaException.cs b/src/Confluent.Kafka/KafkaException.cs index 4e89edf77..50c400f39 100644 --- a/src/Confluent.Kafka/KafkaException.cs +++ b/src/Confluent.Kafka/KafkaException.cs @@ -52,7 +52,7 @@ public KafkaException(Error error) public KafkaException(Error error, Exception innerException) : base(error.Reason, innerException) { - Error = error; + Error = error; } /// diff --git a/src/Confluent.Kafka/Library.cs b/src/Confluent.Kafka/Library.cs index 219495818..358e46a28 100644 --- a/src/Confluent.Kafka/Library.cs +++ b/src/Confluent.Kafka/Library.cs @@ -22,7 +22,7 @@ using Confluent.Kafka.Internal; using Confluent.Kafka.Impl; -[assembly:CLSCompliant(true)] +[assembly: CLSCompliant(true)] namespace Confluent.Kafka @@ -49,7 +49,7 @@ public static int Version get { Librdkafka.Initialize(null); - return (int) Librdkafka.version(); + return (int)Librdkafka.version(); } } diff --git a/src/Confluent.Kafka/LogMessage.cs b/src/Confluent.Kafka/LogMessage.cs index 94a056b31..77df4a666 100644 --- a/src/Confluent.Kafka/LogMessage.cs +++ b/src/Confluent.Kafka/LogMessage.cs @@ -59,18 +59,18 @@ public LogMessage(string name, SyslogLevel level, string facility, string messag /// Gets the log level (levels correspond to syslog(3)), lower is worse. /// public SyslogLevel Level { get; } - + /// /// Gets the facility (section of librdkafka code) that produced the message. /// public string Facility { get; } - + /// /// Gets the log message. /// public string Message { get; } - + // SysLog levels: // [0] emergency, [1] alert, [2] critical, [3] error, [4] warning, [5] notice, [6] info, [7] debug. diff --git a/src/Confluent.Kafka/Null.cs b/src/Confluent.Kafka/Null.cs index 3f1b797d0..2b73015a5 100644 --- a/src/Confluent.Kafka/Null.cs +++ b/src/Confluent.Kafka/Null.cs @@ -24,6 +24,6 @@ namespace Confluent.Kafka /// public sealed class Null { - private Null() {} + private Null() { } } } diff --git a/src/Confluent.Kafka/PartitionMetadata.cs b/src/Confluent.Kafka/PartitionMetadata.cs index 8bf4ec707..825a98aa0 100644 --- a/src/Confluent.Kafka/PartitionMetadata.cs +++ b/src/Confluent.Kafka/PartitionMetadata.cs @@ -99,4 +99,4 @@ public override string ToString() } } -} \ No newline at end of file +} diff --git a/src/Confluent.Kafka/Producer.cs b/src/Confluent.Kafka/Producer.cs index 79867514b..e46ca1f04 100644 --- a/src/Confluent.Kafka/Producer.cs +++ b/src/Confluent.Kafka/Producer.cs @@ -74,7 +74,7 @@ internal class Config private Handle borrowedHandle; private SafeKafkaHandle KafkaHandle - => ownedKafkaHandle != null + => ownedKafkaHandle != null ? ownedKafkaHandle : borrowedHandle.LibrdkafkaHandle; @@ -112,7 +112,7 @@ private Task StartPollTask(CancellationToken ct) } } } - catch (OperationCanceledException) {} + catch (OperationCanceledException) { } }, ct, TaskCreationOptions.LongRunning, TaskScheduler.Default); @@ -216,17 +216,17 @@ private void DeliveryReportCallbackImpl(IntPtr rk, IntPtr rkmessage, IntPtr opaq } var gch = GCHandle.FromIntPtr(msg._private); - var deliveryHandler = (IDeliveryHandler) gch.Target; + var deliveryHandler = (IDeliveryHandler)gch.Target; gch.Free(); Headers headers = null; - if (this.enableDeliveryReportHeaders) + if (this.enableDeliveryReportHeaders) { headers = new Headers(); Librdkafka.message_headers(rkmessage, out IntPtr hdrsPtr); if (hdrsPtr != IntPtr.Zero) { - for (var i=0; ; ++i) + for (var i = 0; ; ++i) { var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep); if (err != ErrorCode.NoError) @@ -263,8 +263,8 @@ private void DeliveryReportCallbackImpl(IntPtr rk, IntPtr rkmessage, IntPtr opaq { // Topic is not set here in order to avoid the marshalling cost. // Instead, the delivery handler is expected to cache the topic string. - Partition = msg.partition, - Offset = msg.offset, + Partition = msg.partition, + Offset = msg.offset, Error = KafkaHandle.CreatePossiblyFatalError(msg.err, null), Status = messageStatus, Message = new Message { Timestamp = new Timestamp(timestamp, (TimestampType)timestampType), Headers = headers } @@ -399,9 +399,9 @@ public void Flush(CancellationToken cancellationToken) throw new OperationCanceledException(); } } - } - - + } + + /// public void Dispose() { @@ -423,7 +423,7 @@ protected virtual void Dispose(bool disposing) { // Calling Dispose a second or subsequent time should be a no-op. lock (disposeHasBeenCalledLockObj) - { + { if (disposeHasBeenCalled) { return; } disposeHasBeenCalled = true; } @@ -486,7 +486,7 @@ public void SetSaslCredentials(string username, string password) /// - public Handle Handle + public Handle Handle { get { @@ -639,7 +639,8 @@ internal Producer(ProducerBuilder builder) case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; - default: throw new ArgumentException( + default: + throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } @@ -815,10 +816,10 @@ public async Task> ProduceAsync( else { ProduceImpl( - topicPartition.Topic, - valBytes, 0, valBytes == null ? 0 : valBytes.Length, - keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, - message.Timestamp, topicPartition.Partition, headers.BackingList, + topicPartition.Topic, + valBytes, 0, valBytes == null ? 0 : valBytes.Length, + keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, + message.Timestamp, topicPartition.Partition, headers.BackingList, null); var result = new DeliveryResult @@ -931,11 +932,11 @@ public void Produce( { throw new ProduceException( ex.Error, - new DeliveryReport - { - Message = message, - TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) - }); + new DeliveryReport + { + Message = message, + TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) + }); } } @@ -1023,12 +1024,12 @@ public void HandleDeliveryReport(DeliveryReport deliveryReport) { TopicPartitionOffsetError = deliveryReport.TopicPartitionOffsetError, Status = deliveryReport.Status, - Message = new Message + Message = new Message { Key = Key, Value = Value, - Timestamp = deliveryReport.Message == null - ? new Timestamp(0, TimestampType.NotAvailable) + Timestamp = deliveryReport.Message == null + ? new Timestamp(0, TimestampType.NotAvailable) : deliveryReport.Message.Timestamp, Headers = deliveryReport.Message?.Headers } @@ -1054,8 +1055,8 @@ public void BeginTransaction() /// public void CommitTransaction(TimeSpan timeout) - => KafkaHandle.CommitTransaction(timeout.TotalMillisecondsAsInt()); - + => KafkaHandle.CommitTransaction(timeout.TotalMillisecondsAsInt()); + /// public void CommitTransaction() => KafkaHandle.CommitTransaction(-1); diff --git a/src/Confluent.Kafka/ProducerBuilder.cs b/src/Confluent.Kafka/ProducerBuilder.cs index e2a3b488b..e2eb92382 100644 --- a/src/Confluent.Kafka/ProducerBuilder.cs +++ b/src/Confluent.Kafka/ProducerBuilder.cs @@ -114,7 +114,7 @@ public class ProducerBuilder /// internal protected IAsyncSerializer AsyncValueSerializer { get; set; } - internal Producer.Config ConstructBaseConfig(Producer producer) + internal Producer.Config ConstructBaseConfig(Producer producer) { return new Producer.Config { diff --git a/src/Confluent.Kafka/SerializationContext.cs b/src/Confluent.Kafka/SerializationContext.cs index a8b20f857..19bbbefc1 100644 --- a/src/Confluent.Kafka/SerializationContext.cs +++ b/src/Confluent.Kafka/SerializationContext.cs @@ -53,7 +53,7 @@ public SerializationContext(MessageComponentType component, string topic, Header /// The topic the data is being written to or read from. /// public string Topic { get; private set; } - + /// /// The component of the message the serialization operation relates to. /// diff --git a/src/Confluent.Kafka/Serializers.cs b/src/Confluent.Kafka/Serializers.cs index b9e9ac2eb..47592de9b 100644 --- a/src/Confluent.Kafka/Serializers.cs +++ b/src/Confluent.Kafka/Serializers.cs @@ -29,7 +29,7 @@ public static class Serializers /// String (UTF8) serializer. /// public static ISerializer Utf8 = new Utf8Serializer(); - + private class Utf8Serializer : ISerializer { public byte[] Serialize(string data, SerializationContext context) @@ -173,7 +173,7 @@ public byte[] Serialize(double data, SerializationContext context) /// Byte order is original order. /// public static ISerializer ByteArray = new ByteArraySerializer(); - + private class ByteArraySerializer : ISerializer { public byte[] Serialize(byte[] data, SerializationContext context) diff --git a/src/Confluent.Kafka/SysLogLevel.cs b/src/Confluent.Kafka/SysLogLevel.cs index 19e97a738..e57e2809b 100644 --- a/src/Confluent.Kafka/SysLogLevel.cs +++ b/src/Confluent.Kafka/SysLogLevel.cs @@ -51,12 +51,12 @@ public enum SyslogLevel /// Normal, but significant condition. /// Notice = 5, - + /// /// Informational message. /// Info = 6, - + /// /// Debug-level message. /// diff --git a/src/Confluent.Kafka/Timestamp.cs b/src/Confluent.Kafka/Timestamp.cs index 64b50ce6e..23bbd73d6 100644 --- a/src/Confluent.Kafka/Timestamp.cs +++ b/src/Confluent.Kafka/Timestamp.cs @@ -41,10 +41,10 @@ public static Timestamp Default /// the number of seconds past this UTC time, excluding /// leap seconds. /// - public static readonly DateTime UnixTimeEpoch + public static readonly DateTime UnixTimeEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); - private const long UnixTimeEpochMilliseconds + private const long UnixTimeEpochMilliseconds = 62135596800000; // = UnixTimeEpoch.TotalMiliseconds @@ -90,8 +90,8 @@ public Timestamp(DateTime dateTime, TimestampType type) /// The DateTime value corresponding to the timestamp. /// public Timestamp(DateTime dateTime) - : this(dateTime, TimestampType.CreateTime) - {} + : this(dateTime, TimestampType.CreateTime) + { } /// /// Initializes a new instance of the Timestamp structure. @@ -101,8 +101,8 @@ public Timestamp(DateTime dateTime) /// The DateTimeOffset value corresponding to the timestamp. /// public Timestamp(DateTimeOffset dateTimeOffset) - : this(dateTimeOffset.UtcDateTime, TimestampType.CreateTime) - {} + : this(dateTimeOffset.UtcDateTime, TimestampType.CreateTime) + { } /// /// Gets the timestamp type. diff --git a/src/Confluent.Kafka/TopicCollection.cs b/src/Confluent.Kafka/TopicCollection.cs index 35b143f3b..7521b5b73 100644 --- a/src/Confluent.Kafka/TopicCollection.cs +++ b/src/Confluent.Kafka/TopicCollection.cs @@ -32,7 +32,7 @@ public class TopicCollection private TopicCollection() { } - + /// /// Topic names. /// diff --git a/src/Confluent.Kafka/TopicPartition.cs b/src/Confluent.Kafka/TopicPartition.cs index cdf0c16f6..739316d5c 100644 --- a/src/Confluent.Kafka/TopicPartition.cs +++ b/src/Confluent.Kafka/TopicPartition.cs @@ -78,7 +78,7 @@ public override bool Equals(object obj) /// public override int GetHashCode() // x by prime number is quick and gives decent distribution. - => Partition.GetHashCode()*251 + Topic.GetHashCode(); + => Partition.GetHashCode() * 251 + Topic.GetHashCode(); /// /// Tests whether TopicPartition instance a is equal to TopicPartition instance b. diff --git a/src/Confluent.Kafka/TopicPartitionError.cs b/src/Confluent.Kafka/TopicPartitionError.cs index 9b90db008..d02b004d5 100644 --- a/src/Confluent.Kafka/TopicPartitionError.cs +++ b/src/Confluent.Kafka/TopicPartitionError.cs @@ -33,7 +33,7 @@ public class TopicPartitionError /// A Kafka error. /// public TopicPartitionError(TopicPartition tp, Error error) - : this(tp.Topic, tp.Partition, error) {} + : this(tp.Topic, tp.Partition, error) { } /// @@ -104,7 +104,7 @@ public override bool Equals(object obj) /// public override int GetHashCode() // x by prime number is quick and gives decent distribution. - => (Partition.GetHashCode()*251 + Topic.GetHashCode())*251 + Error.GetHashCode(); + => (Partition.GetHashCode() * 251 + Topic.GetHashCode()) * 251 + Error.GetHashCode(); /// /// Tests whether TopicPartitionError instance a is equal to TopicPartitionError instance b. diff --git a/src/Confluent.Kafka/TopicPartitionInfo.cs b/src/Confluent.Kafka/TopicPartitionInfo.cs index bf9ffe69b..0900d83e1 100644 --- a/src/Confluent.Kafka/TopicPartitionInfo.cs +++ b/src/Confluent.Kafka/TopicPartitionInfo.cs @@ -65,7 +65,7 @@ public override string ToString() ISR.Select(isr => isr?.ToString() ?? "null") ); - + result.Append($"{{\"Partition\": {Partition}"); result.Append($", \"Leader\": {leader}, \"Replicas\": [{replicas}]"); result.Append($", \"ISR\": [{isrs}]}}"); diff --git a/src/Confluent.Kafka/TopicPartitionOffset.cs b/src/Confluent.Kafka/TopicPartitionOffset.cs index 425b0cb65..75faf3adb 100644 --- a/src/Confluent.Kafka/TopicPartitionOffset.cs +++ b/src/Confluent.Kafka/TopicPartitionOffset.cs @@ -34,7 +34,7 @@ public class TopicPartitionOffset /// public TopicPartitionOffset(TopicPartition tp, Offset offset) : this(tp.Topic, tp.Partition, offset, null) { } - + /// /// Initializes a new TopicPartitionOffset instance. /// @@ -105,7 +105,7 @@ public TopicPartitionOffset(string topic, Partition partition, /// Gets the Kafka partition offset value. /// public Offset Offset { get; } - + /// /// Gets the offset leader epoch (optional). /// @@ -143,9 +143,9 @@ public override bool Equals(object obj) /// /// An integer that specifies a hash value for this TopicPartitionOffset. /// - public override int GetHashCode() + public override int GetHashCode() // x by prime number is quick and gives decent distribution. - => (Partition.GetHashCode()*251 + Topic.GetHashCode())*251 + Offset.GetHashCode(); + => (Partition.GetHashCode() * 251 + Topic.GetHashCode()) * 251 + Offset.GetHashCode(); /// /// Tests whether TopicPartitionOffset instance a is equal to TopicPartitionOffset instance b. @@ -165,7 +165,7 @@ public override int GetHashCode() { return (b is null); } - + return a.Equals(b); } diff --git a/src/Confluent.Kafka/TopicPartitionOffsetError.cs b/src/Confluent.Kafka/TopicPartitionOffsetError.cs index ee4521a91..523723ccb 100644 --- a/src/Confluent.Kafka/TopicPartitionOffsetError.cs +++ b/src/Confluent.Kafka/TopicPartitionOffsetError.cs @@ -41,7 +41,7 @@ public class TopicPartitionOffsetError public TopicPartitionOffsetError(TopicPartition tp, Offset offset, Error error, int? leaderEpoch = null) - : this(tp.Topic, tp.Partition, offset, error, leaderEpoch) {} + : this(tp.Topic, tp.Partition, offset, error, leaderEpoch) { } /// /// Initializes a new TopicPartitionOffsetError instance. @@ -54,7 +54,8 @@ public TopicPartitionOffsetError(TopicPartition tp, Offset offset, /// public TopicPartitionOffsetError(TopicPartitionOffset tpo, Error error) : this(tpo.Topic, tpo.Partition, tpo.Offset, error, - tpo.LeaderEpoch) {} + tpo.LeaderEpoch) + { } /// /// Initializes a new TopicPartitionOffsetError instance. @@ -98,7 +99,7 @@ public TopicPartitionOffsetError(string topic, Partition partition, Offset offse /// Gets the Kafka partition offset value. /// public Offset Offset { get; } - + /// /// Gets the offset leader epoch (optional). /// @@ -149,7 +150,7 @@ public override bool Equals(object obj) /// public override int GetHashCode() // x by prime number is quick and gives decent distribution. - => ((Partition.GetHashCode()*251 + Topic.GetHashCode())*251 + Offset.GetHashCode())*251 + Error.GetHashCode(); + => ((Partition.GetHashCode() * 251 + Topic.GetHashCode()) * 251 + Offset.GetHashCode()) * 251 + Error.GetHashCode(); /// /// Tests whether TopicPartitionOffsetError instance a is equal to TopicPartitionOffsetError instance b. @@ -204,7 +205,7 @@ public static explicit operator TopicPartitionOffset(TopicPartitionOffsetError t { throw new KafkaException(tpoe.Error); } - + return tpoe.TopicPartitionOffset; } diff --git a/src/Confluent.Kafka/TopicPartitionTimestamp.cs b/src/Confluent.Kafka/TopicPartitionTimestamp.cs index 8e962e7e6..6fb273e96 100644 --- a/src/Confluent.Kafka/TopicPartitionTimestamp.cs +++ b/src/Confluent.Kafka/TopicPartitionTimestamp.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc., 2015-2016 Andreas Heider +// Copyright 2016-2017 Confluent Inc., 2015-2016 Andreas Heider // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ public class TopicPartitionTimestamp /// A Kafka timestamp value. /// public TopicPartitionTimestamp(TopicPartition tp, Timestamp timestamp) - : this (tp.Topic, tp.Partition, timestamp) + : this(tp.Topic, tp.Partition, timestamp) { } diff --git a/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsClient.cs index 0b4a0ff94..e0bc9c10a 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsClient.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Threading.Tasks; using Amazon; @@ -12,19 +12,21 @@ public class AwsKmsClient : IKmsClient { private AmazonKeyManagementServiceClient kmsClient; private string keyId; - + public string KekId { get; } - + public AwsKmsClient(string kekId, AWSCredentials credentials) { KekId = kekId; - - if (!kekId.StartsWith(AwsKmsDriver.Prefix)) { - throw new ArgumentException(string.Format($"key URI must start with {AwsKmsDriver.Prefix}")); + + if (!kekId.StartsWith(AwsKmsDriver.Prefix)) + { + throw new ArgumentException(string.Format($"key URI must start with {AwsKmsDriver.Prefix}")); } keyId = KekId.Substring(AwsKmsDriver.Prefix.Length); string[] tokens = keyId.Split(':'); - if (tokens.Length < 4) { + if (tokens.Length < 4) + { throw new ArgumentException("invalid key URI"); } string regionName = tokens[3]; @@ -36,9 +38,9 @@ public AwsKmsClient(string kekId, AWSCredentials credentials) public bool DoesSupport(string uri) { - return uri.StartsWith(AwsKmsDriver.Prefix); + return uri.StartsWith(AwsKmsDriver.Prefix); } - + public async Task Encrypt(byte[] plaintext) { using var dataStream = new MemoryStream(plaintext); @@ -63,4 +65,4 @@ public async Task Decrypt(byte[] ciphertext) return response.Plaintext.ToArray(); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsDriver.cs index 8f605e13d..2a1fb55f0 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Aws/AwsKmsDriver.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Amazon.Runtime; namespace Confluent.SchemaRegistry.Encryption.Aws @@ -13,7 +13,7 @@ public static void Register() public static readonly string Prefix = "aws-kms://"; public static readonly string AccessKeyId = "access.key.id"; public static readonly string SecretAccessKey = "secret.access.key"; - + public string GetKeyUrlPrefix() { return Prefix; @@ -22,7 +22,7 @@ public string GetKeyUrlPrefix() public IKmsClient NewKmsClient(IDictionary config, string keyUrl) { AWSCredentials credentials = null; - if (config.TryGetValue(AccessKeyId, out string accessKeyId) + if (config.TryGetValue(AccessKeyId, out string accessKeyId) && config.TryGetValue(SecretAccessKey, out string secretAccessKey)) { credentials = new BasicAWSCredentials(accessKeyId, secretAccessKey); @@ -30,4 +30,4 @@ public IKmsClient NewKmsClient(IDictionary config, string keyUrl return new AwsKmsClient(keyUrl, credentials); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsClient.cs index 2ef3f1cd7..83b995b56 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsClient.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading.Tasks; using Azure.Core; using Azure.Security.KeyVault.Keys.Cryptography; @@ -10,24 +10,25 @@ public class AzureKmsClient : IKmsClient private CryptographyClient kmsClient; private TokenCredential credentials; private string keyId; - + public string KekId { get; } - + public AzureKmsClient(string kekId, TokenCredential tokenCredential) { KekId = kekId; - if (!kekId.StartsWith(AzureKmsDriver.Prefix)) { - throw new ArgumentException(string.Format($"key URI must start with {AzureKmsDriver.Prefix}")); + if (!kekId.StartsWith(AzureKmsDriver.Prefix)) + { + throw new ArgumentException(string.Format($"key URI must start with {AzureKmsDriver.Prefix}")); } keyId = KekId.Substring(AzureKmsDriver.Prefix.Length); credentials = tokenCredential; } - + public bool DoesSupport(string uri) { - return uri.StartsWith(AzureKmsDriver.Prefix); + return uri.StartsWith(AzureKmsDriver.Prefix); } - + public async Task Encrypt(byte[] plaintext) { var client = GetCryptographyClient(); @@ -41,7 +42,7 @@ public async Task Decrypt(byte[] ciphertext) var result = await client.DecryptAsync(EncryptionAlgorithm.RsaOaep256, ciphertext).ConfigureAwait(false); return result.Plaintext; } - + private CryptographyClient GetCryptographyClient() { if (kmsClient == null) diff --git a/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsDriver.cs index d40277d41..dd23e472c 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Azure/AzureKmsDriver.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Azure.Core; using Azure.Identity; @@ -10,12 +10,12 @@ public static void Register() { KmsRegistry.RegisterKmsDriver(new AzureKmsDriver()); } - + public static readonly string Prefix = "azure-kms://"; public static readonly string TenantId = "tenant.id"; public static readonly string ClientId = "client.id"; public static readonly string ClientSecret = "client.secret"; - + public string GetKeyUrlPrefix() { return Prefix; @@ -24,7 +24,7 @@ public string GetKeyUrlPrefix() public IKmsClient NewKmsClient(IDictionary config, string keyUrl) { TokenCredential credential; - if (config.TryGetValue(TenantId, out string tenantId) + if (config.TryGetValue(TenantId, out string tenantId) && config.TryGetValue(ClientId, out string clientId) && config.TryGetValue(ClientSecret, out string clientSecret)) { @@ -37,4 +37,4 @@ public IKmsClient NewKmsClient(IDictionary config, string keyUrl return new AzureKmsClient(keyUrl, credential); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsClient.cs index db5fd4683..ebedc58d6 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsClient.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading.Tasks; using Google.Apis.Auth.OAuth2; using Google.Cloud.Kms.V1; @@ -11,7 +11,7 @@ public class GcpKmsClient : IKmsClient private KeyManagementServiceClient kmsClient; private string keyId; private CryptoKeyName keyName; - + public string KekId { get; } public GcpKmsClient(string kekId, GoogleCredential credential) @@ -27,9 +27,9 @@ public GcpKmsClient(string kekId, GoogleCredential credential) keyName = CryptoKeyName.Parse(keyId); kmsClient = credential != null ? new KeyManagementServiceClientBuilder() - { - GoogleCredential = credential - } + { + GoogleCredential = credential + } .Build() : KeyManagementServiceClient.Create(); } @@ -38,7 +38,7 @@ public bool DoesSupport(string uri) { return uri.StartsWith(GcpKmsDriver.Prefix); } - + public async Task Encrypt(byte[] plaintext) { var result = await kmsClient.EncryptAsync(keyName, ByteString.CopyFrom(plaintext)) @@ -53,4 +53,4 @@ public async Task Decrypt(byte[] ciphertext) return result.Plaintext.ToByteArray(); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsDriver.cs index cdc2d688b..27af5cf53 100644 --- a/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption.Gcp/GcpKmsDriver.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using Google.Apis.Auth.OAuth2; @@ -10,14 +10,14 @@ public static void Register() { KmsRegistry.RegisterKmsDriver(new GcpKmsDriver()); } - + public static readonly string Prefix = "gcp-kms://"; public static readonly string AccountType = "account.type"; public static readonly string ClientId = "client.id"; public static readonly string ClientEmail = "client.email"; public static readonly string PrivateKeyId = "private.key.id"; public static readonly string PrivateKey = "private.key"; - + public string GetKeyUrlPrefix() { return Prefix; @@ -46,4 +46,4 @@ public IKmsClient NewKmsClient(IDictionary config, string keyUrl return new GcpKmsClient(keyUrl, credentials); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsClient.cs index 303b739f8..491296bec 100644 --- a/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsClient.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Text; using System.Threading.Tasks; using VaultSharp; @@ -14,11 +14,11 @@ public class HcVaultKmsClient : IKmsClient private IVaultClient kmsClient; private string keyId; private string keyName; - + public string KekId { get; } public string Namespace { get; } public string TokenId { get; } - + public HcVaultKmsClient(string kekId, string ns, string tokenId) { if (tokenId == null) @@ -29,15 +29,16 @@ public HcVaultKmsClient(string kekId, string ns, string tokenId) KekId = kekId; Namespace = ns; TokenId = tokenId; - - if (!kekId.StartsWith(HcVaultKmsDriver.Prefix)) { - throw new ArgumentException(string.Format($"key URI must start with {HcVaultKmsDriver.Prefix}")); + + if (!kekId.StartsWith(HcVaultKmsDriver.Prefix)) + { + throw new ArgumentException(string.Format($"key URI must start with {HcVaultKmsDriver.Prefix}")); } keyId = KekId.Substring(HcVaultKmsDriver.Prefix.Length); IAuthMethodInfo authMethod = new TokenAuthMethodInfo(tokenId); Uri uri = new Uri(keyId); keyName = uri.Segments[^1]; - + var vaultClientSettings = new VaultClientSettings(uri.Scheme + "://" + uri.Authority, authMethod); if (ns != null) { @@ -45,12 +46,12 @@ public HcVaultKmsClient(string kekId, string ns, string tokenId) } kmsClient = new VaultClient(vaultClientSettings); } - + public bool DoesSupport(string uri) { return uri.StartsWith(HcVaultKmsDriver.Prefix); } - + public async Task Encrypt(byte[] plaintext) { var encodedPlaintext = Convert.ToBase64String(plaintext); @@ -77,4 +78,4 @@ public async Task Decrypt(byte[] ciphertext) return Convert.FromBase64String(decryptionResponse.Data.Base64EncodedPlainText); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsDriver.cs index e220afc7d..d4fb84b6c 100644 --- a/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption.HcVault/HcVaultKmsDriver.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; namespace Confluent.SchemaRegistry.Encryption.HcVault @@ -9,11 +9,11 @@ public static void Register() { KmsRegistry.RegisterKmsDriver(new HcVaultKmsDriver()); } - + public static readonly string Prefix = "hcvault://"; public static readonly string TokenId = "token.id"; public static readonly string Namespace = "namespace"; - + public string GetKeyUrlPrefix() { return Prefix; @@ -26,4 +26,4 @@ public IKmsClient NewKmsClient(IDictionary config, string keyUrl return new HcVaultKmsClient(keyUrl, ns, tokenId); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/CachedDekRegistryClient.cs b/src/Confluent.SchemaRegistry.Encryption/CachedDekRegistryClient.cs index cbd631b5f..4f49a4a44 100644 --- a/src/Confluent.SchemaRegistry.Encryption/CachedDekRegistryClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption/CachedDekRegistryClient.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,24 +25,24 @@ namespace Confluent.SchemaRegistry.Encryption { public record KekId(string Name, bool LookupDeletedKeks); - + public record DekId(string KekName, string Subject, int? Version, DekFormat? DekFormat, bool LookupDeletedDeks); - + /// /// A caching DEK Registry client. /// public class CachedDekRegistryClient : IDekRegistryClient, IDisposable { private DekRestService restService; - + private int identityMapCapacity; - + private readonly IDictionary keks = new Dictionary(); private readonly IDictionary deks = new Dictionary(); private readonly SemaphoreSlim cacheMutex = new SemaphoreSlim(1); - + /// /// The default timeout value for Schema Registry REST API calls. /// @@ -343,7 +343,7 @@ public Task> GetDeksAsync(string kekName, bool ignoreDeletedDeks) /// public Task> GetDekVersionsAsync(string kekName, string subject, DekFormat? algorithm, - bool ignoreDeletedDeks) + bool ignoreDeletedDeks) => restService.GetDekVersionsAsync(kekName, subject, algorithm, ignoreDeletedDeks); /// @@ -413,7 +413,7 @@ public Task CreateDekAsync(string kekName, Dek dek) this.deks.Remove(new DekId(kekName, dek.Subject, -1, dek.Algorithm, true)); } } - + /// /// Releases unmanaged resources owned by this CachedSchemaRegistryClient instance. /// @@ -439,4 +439,4 @@ protected virtual void Dispose(bool disposing) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/CompilerServices.cs b/src/Confluent.SchemaRegistry.Encryption/CompilerServices.cs index 10cd52d6e..c4a801a39 100644 --- a/src/Confluent.SchemaRegistry.Encryption/CompilerServices.cs +++ b/src/Confluent.SchemaRegistry.Encryption/CompilerServices.cs @@ -1,4 +1,4 @@ -// Copyright 2024 Confluent Inc. +// Copyright 2024 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,4 +20,4 @@ namespace System.Runtime.CompilerServices { [EditorBrowsable(EditorBrowsableState.Never)] internal static class IsExternalInit { } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Cryptor.cs b/src/Confluent.SchemaRegistry.Encryption/Cryptor.cs index 74e49e2ed..be3d7eff5 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Cryptor.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Cryptor.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Security.Cryptography; using Google.Crypto.Tink; @@ -10,7 +10,7 @@ namespace Confluent.SchemaRegistry.Encryption public class Cryptor { private static byte[] EmptyAAD = new byte[] { }; - + public Cryptor(DekFormat dekFormat) { DekFormat = dekFormat; @@ -37,7 +37,7 @@ public int KeySize() default: throw new ArgumentException(); } - + } public byte[] GenerateKey() @@ -166,4 +166,4 @@ static byte[] DecryptWithAesGcm(byte[] key, byte[] payload) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/FieldEncryptionExecutor.cs b/src/Confluent.SchemaRegistry.Encryption/FieldEncryptionExecutor.cs index 3ec805e56..990638516 100644 --- a/src/Confluent.SchemaRegistry.Encryption/FieldEncryptionExecutor.cs +++ b/src/Confluent.SchemaRegistry.Encryption/FieldEncryptionExecutor.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.IO; using System.Linq; @@ -16,13 +16,13 @@ public static void Register() } public static readonly string RuleType = "ENCRYPT"; - + public static readonly string EncryptKekName = "encrypt.kek.name"; public static readonly string EncryptKmsKeyid = "encrypt.kms.key.id"; public static readonly string EncryptKmsType = "encrypt.kms.type"; public static readonly string EncryptDekAlgorithm = "encrypt.dek.algorithm"; public static readonly string EncryptDekExpiryDays = "encrypt.dek.expiry.days"; - + public static readonly string KmsTypeSuffix = "://"; internal static readonly int LatestVersion = -1; @@ -38,13 +38,13 @@ public FieldEncryptionExecutor() { Clock = new Clock(); } - + public FieldEncryptionExecutor(IDekRegistryClient client, IClock clock) { Client = client; Clock = clock ?? new Clock(); } - + public override void Configure(IEnumerable> config) { Configs = config; @@ -98,7 +98,7 @@ internal static object ToObject(RuleContext.Type type, byte[] bytes) return null; } } - + public override void Dispose() { if (Client != null) @@ -121,7 +121,7 @@ public FieldEncryptionExecutorTransform(FieldEncryptionExecutor executor) { this.executor = executor; } - + public void Init(RuleContext ctx) { cryptor = executor.GetCryptor(ctx); @@ -151,7 +151,7 @@ private async Task GetKek(RuleContext ctx) return registeredKek; } - + private async Task GetOrCreateKek(RuleContext ctx) { bool isRead = ctx.RuleMode == RuleMode.Read; @@ -201,7 +201,7 @@ private async Task GetOrCreateKek(RuleContext ctx) return kek; } - + private int GetDekExpiryDays(RuleContext ctx) { string expiryDays = ctx.GetParameter(FieldEncryptionExecutor.EncryptDekExpiryDays); @@ -219,7 +219,7 @@ private int GetDekExpiryDays(RuleContext ctx) } return days; } - + private async Task RetrieveKekFromRegistry(KekId key) { try @@ -237,7 +237,7 @@ private async Task RetrieveKekFromRegistry(KekId key) throw new RuleException($"Failed to retrieve kek {key.Name}", e); } } - + private async Task StoreKekToRegistry(KekId key, string kmsType, string kmsKeyId, bool shared) { Kek kek = new Kek @@ -262,7 +262,7 @@ private async Task StoreKekToRegistry(KekId key, string kmsType, throw new RuleException($"Failed to create kek {key.Name}", e); } } - + private async Task GetOrCreateDek(RuleContext ctx, int? version) { RegisteredKek kek = await GetKek(ctx).ConfigureAwait(continueOnCapturedContext: false); @@ -326,9 +326,9 @@ private bool IsExpired(RuleContext ctx, RegisteredDek dek) return ctx.RuleMode != RuleMode.Read && dekExpiryDays > 0 && dek != null - && ((double) (now - dek.Timestamp)) / FieldEncryptionExecutor.MillisInDay > dekExpiryDays; + && ((double)(now - dek.Timestamp)) / FieldEncryptionExecutor.MillisInDay > dekExpiryDays; } - + private async Task RetrieveDekFromRegistry(DekId key) { try @@ -360,7 +360,7 @@ private async Task RetrieveDekFromRegistry(DekId key) throw new RuleException($"Failed to retrieve dek for kek {key.KekName}, subject {key.Subject}", e); } } - + private async Task StoreDekToRegistry(DekId key, byte[] encryptedDek) { @@ -486,7 +486,7 @@ private byte[] PrefixVersion(int version, byte[] ciphertext) } } } - + private static IKmsClient GetKmsClient(IEnumerable> configs, RegisteredKek kek) { string keyUrl = kek.KmsType + FieldEncryptionExecutor.KmsTypeSuffix + kek.KmsKeyId; @@ -516,4 +516,4 @@ internal class Clock : IClock { public long NowToUnixTimeMilliseconds() => DateTimeOffset.Now.ToUnixTimeMilliseconds(); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/IDekRegistryClient.cs b/src/Confluent.SchemaRegistry.Encryption/IDekRegistryClient.cs index 2a26e93e0..415f64199 100644 --- a/src/Confluent.SchemaRegistry.Encryption/IDekRegistryClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption/IDekRegistryClient.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -122,4 +122,4 @@ public Task GetDekLatestVersionAsync(string kekName, string subje /// public Task CreateDekAsync(string kekName, Dek dek); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/IKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption/IKmsClient.cs index a566aa021..978f257bb 100644 --- a/src/Confluent.SchemaRegistry.Encryption/IKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption/IKmsClient.cs @@ -1,13 +1,13 @@ -using System.Threading.Tasks; +using System.Threading.Tasks; namespace Confluent.SchemaRegistry.Encryption { public interface IKmsClient { bool DoesSupport(string uri); - + Task Encrypt(byte[] plaintext); Task Decrypt(byte[] ciphertext); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/IKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption/IKmsDriver.cs index f250987cc..61d6f6b0b 100644 --- a/src/Confluent.SchemaRegistry.Encryption/IKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption/IKmsDriver.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; namespace Confluent.SchemaRegistry.Encryption { @@ -8,4 +8,4 @@ public interface IKmsDriver IKmsClient NewKmsClient(IDictionary config, string keyUrl); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/KmsClients.cs b/src/Confluent.SchemaRegistry.Encryption/KmsClients.cs index d3c0b159c..c8fdd18f7 100644 --- a/src/Confluent.SchemaRegistry.Encryption/KmsClients.cs +++ b/src/Confluent.SchemaRegistry.Encryption/KmsClients.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; @@ -10,7 +10,7 @@ namespace Confluent.SchemaRegistry.Encryption public static class KmsClients { private static IDictionary clients = new ConcurrentDictionary(); - + public static IKmsClient Get(string id) { return clients[id]; @@ -21,4 +21,4 @@ public static void Add(string id, IKmsClient kmsClient) clients[id] = kmsClient; } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/KmsRegistry.cs b/src/Confluent.SchemaRegistry.Encryption/KmsRegistry.cs index 5bbf905ab..0b85f3fab 100644 --- a/src/Confluent.SchemaRegistry.Encryption/KmsRegistry.cs +++ b/src/Confluent.SchemaRegistry.Encryption/KmsRegistry.cs @@ -30,7 +30,7 @@ public static class KmsRegistry private static IList kmsDrivers = new List(); private static IList kmsClients = new List(); - + public static void RegisterKmsDriver(IKmsDriver kmsDriver) { kmsDriversMutex.Wait(); @@ -43,7 +43,7 @@ public static void RegisterKmsDriver(IKmsDriver kmsDriver) kmsDriversMutex.Release(); } } - + public static IKmsDriver GetKmsDriver(string keyUrl) { kmsDriversMutex.Wait(); @@ -64,7 +64,7 @@ public static IKmsDriver GetKmsDriver(string keyUrl) throw new ArgumentException("No KMS driver found for key URL: " + keyUrl); ; } - + public static void RegisterKmsClient(IKmsClient kmsClient) { kmsClientsMutex.Wait(); @@ -77,7 +77,7 @@ public static void RegisterKmsClient(IKmsClient kmsClient) kmsClientsMutex.Release(); } } - + public static IKmsClient GetKmsClient(string keyUrl) { kmsClientsMutex.Wait(); diff --git a/src/Confluent.SchemaRegistry.Encryption/LocalKmsClient.cs b/src/Confluent.SchemaRegistry.Encryption/LocalKmsClient.cs index 551d29e06..cad01d150 100644 --- a/src/Confluent.SchemaRegistry.Encryption/LocalKmsClient.cs +++ b/src/Confluent.SchemaRegistry.Encryption/LocalKmsClient.cs @@ -1,4 +1,4 @@ -using System; +using System; using HkdfStandard; using System.Security.Cryptography; using System.Text; @@ -34,7 +34,7 @@ public bool DoesSupport(string uri) { return uri.StartsWith(LocalKmsDriver.Prefix); } - + public Task Encrypt(byte[] plaintext) { return Task.FromResult(cryptor.Encrypt(key, plaintext)); @@ -45,4 +45,4 @@ public Task Decrypt(byte[] ciphertext) return Task.FromResult(cryptor.Decrypt(key, ciphertext)); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/LocalKmsDriver.cs b/src/Confluent.SchemaRegistry.Encryption/LocalKmsDriver.cs index 87ad7bd91..5a2ffda06 100644 --- a/src/Confluent.SchemaRegistry.Encryption/LocalKmsDriver.cs +++ b/src/Confluent.SchemaRegistry.Encryption/LocalKmsDriver.cs @@ -1,19 +1,19 @@ -using System; +using System; using System.Collections.Generic; namespace Confluent.SchemaRegistry.Encryption { - + public class LocalKmsDriver : IKmsDriver { public static void Register() { KmsRegistry.RegisterKmsDriver(new LocalKmsDriver()); } - + public static readonly string Prefix = "local-kms://"; public static readonly string Secret = "secret"; - + public string GetKeyUrlPrefix() { return Prefix; @@ -25,4 +25,4 @@ public IKmsClient NewKmsClient(IDictionary config, string keyUrl return new LocalKmsClient(secret); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Dek.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Dek.cs index e943de17d..1165a1a96 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Dek.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Dek.cs @@ -37,9 +37,9 @@ public class Dek : IEquatable /// /// The DEK algorithm. /// - [DataMember(Name = "algorithm")] + [DataMember(Name = "algorithm")] public DekFormat Algorithm { get; set; } - + /// /// The encrypted key material. /// @@ -56,7 +56,7 @@ public bool Equals(Dek other) { if (ReferenceEquals(null, other)) return false; if (ReferenceEquals(this, other)) return true; - return Subject == other.Subject && Version == other.Version && Algorithm == other.Algorithm && + return Subject == other.Subject && Version == other.Version && Algorithm == other.Algorithm && EncryptedKeyMaterial == other.EncryptedKeyMaterial && Deleted == other.Deleted; } @@ -81,4 +81,4 @@ public override int GetHashCode() } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/DekFormat.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/DekFormat.cs index 451524e44..d9f684086 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/DekFormat.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/DekFormat.cs @@ -1,4 +1,4 @@ -using System.Runtime.Serialization; +using System.Runtime.Serialization; using Newtonsoft.Json; using Newtonsoft.Json.Converters; @@ -15,4 +15,4 @@ public enum DekFormat AES128_GCM, AES256_GCM } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Kek.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Kek.cs index 85fd96d7d..5d71ba8bc 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Kek.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/Kek.cs @@ -38,9 +38,9 @@ public class Kek : IEquatable /// /// The KMS key ID for the KEK /// - [DataMember(Name = "kmsKeyId")] + [DataMember(Name = "kmsKeyId")] public string KmsKeyId { get; set; } - + /// /// The KMS properties. /// @@ -50,31 +50,31 @@ public class Kek : IEquatable /// /// The doc for the KEK. /// - [DataMember(Name = "doc")] + [DataMember(Name = "doc")] public string Doc { get; set; } - + /// /// Whether the KEK is shared. /// [DataMember(Name = "shared")] public bool Shared { get; set; } - + /// /// Whether the KEK is deleted. /// [DataMember(Name = "deleted")] public bool Deleted { get; set; } - + public bool Equals(Kek other) { if (ReferenceEquals(null, other)) return false; if (ReferenceEquals(this, other)) return true; - return Name == other.Name && KmsType == other.KmsType && - KmsKeyId == other.KmsKeyId && - Utils.DictEquals(KmsProps, other.KmsProps) && + return Name == other.Name && KmsType == other.KmsType && + KmsKeyId == other.KmsKeyId && + Utils.DictEquals(KmsProps, other.KmsProps) && Doc == other.Doc && Shared == other.Shared && Deleted == other.Deleted; } - + public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; @@ -99,4 +99,4 @@ public override int GetHashCode() } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredDek.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredDek.cs index 818e2af6d..82d8bcf79 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredDek.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredDek.cs @@ -25,7 +25,7 @@ public class RegisteredDek : Dek, IEquatable private string keyMaterial; private byte[] keyMaterialBytes; private byte[] encryptedKeyMaterialBytes; - + /// /// The KEK name for the DEK. /// @@ -110,4 +110,4 @@ public override int GetHashCode() } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredKek.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredKek.cs index 84b12cca3..bb831ddb8 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredKek.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/RegisteredKek.cs @@ -49,4 +49,4 @@ public override int GetHashCode() return base.GetHashCode(); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/UpdateKek.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/UpdateKek.cs index f4ab08281..dae1f8b8d 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/UpdateKek.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DataContracts/UpdateKek.cs @@ -32,9 +32,9 @@ public class UpdateKek : IEquatable /// /// The doc for the KEK. /// - [DataMember(Name = "doc")] + [DataMember(Name = "doc")] public string Doc { get; set; } - + /// /// Whether the KEK is shared. /// @@ -67,4 +67,4 @@ public override int GetHashCode() } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Encryption/Rest/DekRestService.cs b/src/Confluent.SchemaRegistry.Encryption/Rest/DekRestService.cs index be7b2bc2c..f0e2e92e2 100644 --- a/src/Confluent.SchemaRegistry.Encryption/Rest/DekRestService.cs +++ b/src/Confluent.SchemaRegistry.Encryption/Rest/DekRestService.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -97,4 +97,4 @@ public async Task CreateDekAsync(string kekName, Dek dek) #endregion Deks } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/BuiltinDeclarations.cs b/src/Confluent.SchemaRegistry.Rules/BuiltinDeclarations.cs index 222287164..2c0583ecb 100644 --- a/src/Confluent.SchemaRegistry.Rules/BuiltinDeclarations.cs +++ b/src/Confluent.SchemaRegistry.Rules/BuiltinDeclarations.cs @@ -55,4 +55,4 @@ public static IList Create() return decls; } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/BuiltinLibrary.cs b/src/Confluent.SchemaRegistry.Rules/BuiltinLibrary.cs index c9b5db679..e7f5bd166 100644 --- a/src/Confluent.SchemaRegistry.Rules/BuiltinLibrary.cs +++ b/src/Confluent.SchemaRegistry.Rules/BuiltinLibrary.cs @@ -19,4 +19,4 @@ public virtual IList ProgramOptions }; } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/BuiltinOverload.cs b/src/Confluent.SchemaRegistry.Rules/BuiltinOverload.cs index 25b6b9455..8c6151f57 100644 --- a/src/Confluent.SchemaRegistry.Rules/BuiltinOverload.cs +++ b/src/Confluent.SchemaRegistry.Rules/BuiltinOverload.cs @@ -202,4 +202,4 @@ public static bool ValidateUuid(string input) return Guid.TryParse(input, out _); } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/CelExecutor.cs b/src/Confluent.SchemaRegistry.Rules/CelExecutor.cs index 891d38f59..346294639 100644 --- a/src/Confluent.SchemaRegistry.Rules/CelExecutor.cs +++ b/src/Confluent.SchemaRegistry.Rules/CelExecutor.cs @@ -1,4 +1,4 @@ -using System.Collections; +using System.Collections; using Avro; using Avro.Generic; using Avro.Specific; @@ -95,9 +95,9 @@ private async Task Execute(RuleContext ctx, string rule, object obj, IDi { type = ScriptType.Protobuf; } - else if (typeof(IList).IsAssignableFrom(msg.GetType()) - || (msg.GetType().IsGenericType - && (msg.GetType().GetGenericTypeDefinition() == typeof(List<>) + else if (typeof(IList).IsAssignableFrom(msg.GetType()) + || (msg.GetType().IsGenericType + && (msg.GetType().GetGenericTypeDefinition() == typeof(List<>) || msg.GetType().GetGenericTypeDefinition() == typeof(IList<>)))) { // list not supported @@ -141,12 +141,12 @@ private Script BuildScript(RuleWithArgs ruleWithArgs, object msg) if (msg is ISpecificRecord) { type = ((ISpecificRecord)msg).Schema; - + } else { type = ((GenericRecord)msg).Schema; - + } break; case ScriptType.Json: @@ -306,7 +306,7 @@ private static Google.Api.Expr.V1Alpha1.Type FindTypeForClass(System.Type type) var objType = FindTypeForClass(typeof(object)); return Decls.NewListType(objType); } - + return Decls.NewObjectType(type.FullName); } @@ -323,4 +323,4 @@ public enum ScriptType public record RuleWithArgs(string Rule, ScriptType ScriptType, IDictionary Decls, string Schema); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/CelFieldExecutor.cs b/src/Confluent.SchemaRegistry.Rules/CelFieldExecutor.cs index 0e16a2c67..eba39aefe 100644 --- a/src/Confluent.SchemaRegistry.Rules/CelFieldExecutor.cs +++ b/src/Confluent.SchemaRegistry.Rules/CelFieldExecutor.cs @@ -1,4 +1,4 @@ -using Google.Protobuf; +using Google.Protobuf; using Google.Protobuf.WellKnownTypes; namespace Confluent.SchemaRegistry.Rules @@ -25,7 +25,7 @@ public CelFieldExecutor() public override void Configure(IEnumerable> config) { } - + public override IFieldTransform NewTransform(RuleContext ctx) { CelFieldExecutorTransform transform = new CelFieldExecutorTransform(celExecutor); @@ -37,11 +37,11 @@ public override void Dispose() { celExecutor.Dispose(); } - + public class CelFieldExecutorTransform : IFieldTransform { private CelExecutor celExecutor; - + public CelFieldExecutorTransform(CelExecutor celExecutor) { this.celExecutor = celExecutor; @@ -53,7 +53,7 @@ public void Init(RuleContext ctx) public async Task Transform(RuleContext ctx, RuleContext.FieldContext fieldCtx, object fieldValue) { - if (!fieldCtx.IsPrimitive()) + if (!fieldCtx.IsPrimitive()) { // CEL field transforms only apply to primitive types return fieldValue; @@ -75,10 +75,10 @@ public async Task Transform(RuleContext ctx, RuleContext.FieldContext fi } return result; } - + public void Dispose() { } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Rules/JsonataExecutor.cs b/src/Confluent.SchemaRegistry.Rules/JsonataExecutor.cs index 33dd64b5f..fe44dbcbb 100644 --- a/src/Confluent.SchemaRegistry.Rules/JsonataExecutor.cs +++ b/src/Confluent.SchemaRegistry.Rules/JsonataExecutor.cs @@ -1,4 +1,4 @@ -using Jsonata.Net.Native; +using Jsonata.Net.Native; using Jsonata.Net.Native.Json; using Jsonata.Net.Native.JsonNet; @@ -12,14 +12,14 @@ public static void Register() } public static readonly string RuleType = "JSONATA"; - + private readonly IDictionary cache = new Dictionary(); private readonly SemaphoreSlim cacheMutex = new SemaphoreSlim(1); - + public JsonataExecutor() { } - + public void Configure(IEnumerable> config) { } diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializer.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializer.cs index 98b941488..095611e41 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializer.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2019 Confluent Inc. +// Copyright 2016-2019 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ public AvroDeserializer(ISchemaRegistryClient schemaRegistryClient) /// Deserializer configuration properties (refer to /// ). /// - public AvroDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null) + public AvroDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null) : this(schemaRegistryClient, config != null ? new AvroDeserializerConfig(config) : null) { } @@ -69,7 +69,7 @@ public AvroDeserializer(ISchemaRegistryClient schemaRegistryClient, AvroDeserial this.schemaRegistryClient = schemaRegistryClient; this.config = config; this.ruleExecutors = ruleExecutors ?? new List(); - + if (config == null) { return; } var nonAvroConfig = config diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializerConfig.cs index e122639af..ba93d1bab 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroDeserializerConfig.cs @@ -68,7 +68,7 @@ public AvroDeserializerConfig() { } /// public AvroDeserializerConfig(IEnumerable> config) : base(config.ToDictionary(v => v.Key, v => v.Value)) { } - + /// /// Specifies whether or not the Avro deserializer should use the latest subject /// version for serialization. diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializer.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializer.cs index 4c57e6b74..e8e0703c1 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializer.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ public class AvroSerializer : IAsyncSerializer /// [Obsolete("Superseded by AvroSerializer(ISchemaRegistryClient, AvroSerializerConfig)")] public AvroSerializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config) - {} + { } /// @@ -87,7 +87,7 @@ public AvroSerializer(ISchemaRegistryClient schemaRegistryClient, AvroSerializer this.schemaRegistryClient = schemaRegistryClient; this.config = config; this.ruleExecutors = ruleExecutors ?? new List(); - + if (config == null) { return; } var nonAvroConfig = config diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializerConfig.cs index 093f48613..3bcfd1e0f 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroSerializerConfig.cs @@ -125,8 +125,8 @@ public bool? AutoRegisterSchemas get { return GetBool(PropertyNames.AutoRegisterSchemas); } set { SetObject(PropertyNames.AutoRegisterSchemas, value); } } - - + + /// /// Specifies whether to normalize schemas, which will transform schemas /// to have a consistent format, including ordering properties and references. @@ -153,7 +153,7 @@ public bool? UseLatestVersion get { return GetBool(PropertyNames.UseLatestVersion); } set { SetObject(PropertyNames.UseLatestVersion, value); } } - + /// /// Specifies whether or not the Avro serializer should use the latest subject diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroUtils.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroUtils.cs index a2d4ec72b..9f6c1b240 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/AvroUtils.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/AvroUtils.cs @@ -67,7 +67,7 @@ public static async Task Transform(RuleContext ctx, Avro.Schema schema, .ContinueWith(t => new KeyValuePair(it.Key, it.Value))) .ToList(); KeyValuePair[] entries = await Task.WhenAll(dictTasks).ConfigureAwait(false); - return entries.ToDictionary(it => it.Key, it => it.Value); + return entries.ToDictionary(it => it.Key, it => it.Value); case Avro.Schema.Type.Record: RecordSchema rs = (RecordSchema)schema; foreach (Field f in rs.Fields) @@ -86,7 +86,8 @@ public static async Task Transform(RuleContext ctx, Avro.Schema schema, { throw new RuleConditionException(ctx.Rule); } - } else + } + else { specificRecord.Put(f.Pos, newValue); } @@ -122,7 +123,7 @@ public static async Task Transform(RuleContext ctx, Avro.Schema schema, ISet ruleTags = ctx.Rule.Tags ?? new HashSet(); ISet intersect = new HashSet(fieldContext.Tags); intersect.IntersectWith(ruleTags); - + if (ruleTags.Count == 0 || intersect.Count != 0) { return await fieldTransform.Transform(ctx, fieldContext, message) @@ -202,7 +203,7 @@ private class AvroSpecificWriter : SpecificDefaultWriter, IUnionResolver public AvroSpecificWriter(Avro.Schema schema) : base(schema) { } - + public int Resolve(UnionSchema us, object obj) { for (int i = 0; i < us.Count; i++) @@ -212,13 +213,13 @@ public int Resolve(UnionSchema us, object obj) throw new AvroException("Cannot find a match for " + obj.GetType() + " in " + us); } } - + private class AvroGenericWriter : DefaultWriter, IUnionResolver { public AvroGenericWriter(Avro.Schema schema) : base(schema) { } - + public int Resolve(UnionSchema us, object obj) { for (int i = 0; i < us.Count; i++) @@ -229,4 +230,4 @@ public int Resolve(UnionSchema us, object obj) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/GenericDeserializerImpl.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/GenericDeserializerImpl.cs index b6e2f6bc8..8a37132f6 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/GenericDeserializerImpl.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/GenericDeserializerImpl.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,11 +35,11 @@ internal class GenericDeserializerImpl : AsyncDeserializer - private readonly Dictionary<(Avro.Schema, Avro.Schema), DatumReader> datumReaderBySchema + private readonly Dictionary<(Avro.Schema, Avro.Schema), DatumReader> datumReaderBySchema = new Dictionary<(Avro.Schema, Avro.Schema), DatumReader>(); - + public GenericDeserializerImpl( - ISchemaRegistryClient schemaRegistryClient, + ISchemaRegistryClient schemaRegistryClient, AvroDeserializerConfig config, IList ruleExecutors) : base(schemaRegistryClient, config, ruleExecutors) { @@ -58,7 +58,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory : await Deserialize(context.Topic, context.Headers, data.ToArray(), context.Component == MessageComponentType.Key); } - + public async Task Deserialize(string topic, Headers headers, byte[] array, bool isKey) { try @@ -77,12 +77,12 @@ public async Task Deserialize(string topic, Headers headers, byte new SerializationContext(isKey ? MessageComponentType.Key : MessageComponentType.Value, topic), null) // else fall back to the deprecated config from (or default as currently supplied by) SchemaRegistry. - : schemaRegistryClient == null + : schemaRegistryClient == null ? null - : isKey + : isKey ? schemaRegistryClient.ConstructKeySubjectName(topic) : schemaRegistryClient.ConstructValueSubjectName(topic); - + Schema latestSchema = await GetReaderSchema(subject) .ConfigureAwait(continueOnCapturedContext: false); @@ -101,7 +101,7 @@ public async Task Deserialize(string topic, Headers headers, byte var writerId = IPAddress.NetworkToHostOrder(reader.ReadInt32()); (writerSchemaJson, writerSchema) = await GetSchema(subject, writerId); - + if (latestSchema != null) { migrations = await GetMigrations(subject, writerSchemaJson, latestSchema) @@ -126,14 +126,14 @@ public async Task Deserialize(string topic, Headers headers, byte jsonString = Encoding.UTF8.GetString(jsonStream.ToArray()); } - + JToken json = JToken.Parse(jsonString); json = await ExecuteMigrations(migrations, isKey, subject, topic, headers, json) .ContinueWith(t => (JToken)t.Result) .ConfigureAwait(continueOnCapturedContext: false); var latestSchemaAvro = await GetParsedSchema(latestSchema); Avro.IO.Decoder decoder = new JsonDecoder(latestSchemaAvro, json.ToString(Formatting.None)); - + datumReader = new GenericReader(latestSchemaAvro, latestSchemaAvro); data = datumReader.Read(default(GenericRecord), decoder); } @@ -143,8 +143,8 @@ public async Task Deserialize(string topic, Headers headers, byte data = datumReader.Read(default(GenericRecord), new BinaryDecoder(stream)); } } - - FieldTransformer fieldTransformer = async (ctx, transform, message) => + + FieldTransformer fieldTransformer = async (ctx, transform, message) => { return await AvroUtils.Transform(ctx, writerSchema, message, transform).ConfigureAwait(false); }; @@ -165,7 +165,7 @@ public async Task Deserialize(string topic, Headers headers, byte { return Task.FromResult(Avro.Schema.Parse(schema.SchemaString)); } - + private async Task> GetDatumReader(Avro.Schema writerSchema, Avro.Schema readerSchema) { DatumReader datumReader; diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/GenericSerializerImpl.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/GenericSerializerImpl.cs index 0020f4a7d..9ef647a5a 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/GenericSerializerImpl.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/GenericSerializerImpl.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -125,7 +125,7 @@ public async Task Serialize(string topic, Headers headers, GenericRecord // Verify schema compatibility (& register as required) + get the // id corresponding to the schema. - + // TODO: Again, the hash functions in use below are potentially // slow since writerSchemaString is potentially long. It would be // better to use hash functions based on the writerSchemaString @@ -141,7 +141,7 @@ public async Task Serialize(string topic, Headers headers, GenericRecord latestSchema = await GetReaderSchema(subject) .ConfigureAwait(continueOnCapturedContext: false); - + var subjectSchemaPair = new KeyValuePair(subject, writerSchemaString); if (!registeredSchemas.Contains(subjectSchemaPair)) { @@ -189,7 +189,7 @@ public async Task Serialize(string topic, Headers headers, GenericRecord if (latestSchema != null) { var schema = await GetParsedSchema(latestSchema); - FieldTransformer fieldTransformer = async (ctx, transform, message) => + FieldTransformer fieldTransformer = async (ctx, transform, message) => { return await AvroUtils.Transform(ctx, schema, message, transform).ConfigureAwait(false); }; @@ -214,7 +214,7 @@ public async Task Serialize(string topic, Headers headers, GenericRecord throw e.InnerException; } } - + protected override Task ParseSchema(Schema schema) { return Task.FromResult(Avro.Schema.Parse(schema.SchemaString)); diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificDeserializerImpl.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificDeserializerImpl.cs index a26009bd6..ce9aa85c4 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificDeserializerImpl.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificDeserializerImpl.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ internal class SpecificDeserializerImpl : AsyncDeserializer /// A datum reader cache (one corresponding to each write schema that's been seen) /// is maintained so that they only need to be constructed once. /// - private readonly Dictionary<(Avro.Schema, Avro.Schema), DatumReader> datumReaderBySchema + private readonly Dictionary<(Avro.Schema, Avro.Schema), DatumReader> datumReaderBySchema = new Dictionary<(Avro.Schema, Avro.Schema), DatumReader>(); /// @@ -95,7 +95,7 @@ public SpecificDeserializerImpl( "long, byte[], instances of ISpecificRecord and subclasses of SpecificFixed." ); } - + if (config == null) { return; } if (config.UseLatestVersion != null) { this.useLatestVersion = config.UseLatestVersion.Value; } @@ -111,7 +111,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i : await Deserialize(context.Topic, context.Headers, data.ToArray(), context.Component == MessageComponentType.Key); } - + public async Task Deserialize(string topic, Headers headers, byte[] array, bool isKey) { try @@ -123,16 +123,16 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo { throw new InvalidDataException($"Expecting data framing of length 5 bytes or more but total data size is {array.Length} bytes"); } - + string subject = this.subjectNameStrategy != null // use the subject name strategy specified in the serializer config if available. ? this.subjectNameStrategy( new SerializationContext(isKey ? MessageComponentType.Key : MessageComponentType.Value, topic), null) // else fall back to the deprecated config from (or default as currently supplied by) SchemaRegistry. - : schemaRegistryClient == null + : schemaRegistryClient == null ? null - : isKey + : isKey ? schemaRegistryClient.ConstructKeySubjectName(topic) : schemaRegistryClient.ConstructValueSubjectName(topic); @@ -154,7 +154,7 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo var writerId = IPAddress.NetworkToHostOrder(reader.ReadInt32()); (writerSchemaJson, writerSchema) = await GetSchema(subject, writerId); - + if (latestSchema != null) { migrations = await GetMigrations(subject, writerSchemaJson, latestSchema) @@ -166,7 +166,7 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo { data = new GenericReader(writerSchema, writerSchema) .Read(default(GenericRecord), new BinaryDecoder(stream)); - + string jsonString = null; using (var jsonStream = new MemoryStream()) { @@ -179,13 +179,13 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo jsonString = Encoding.UTF8.GetString(jsonStream.ToArray()); } - + JToken json = JToken.Parse(jsonString); json = await ExecuteMigrations(migrations, isKey, subject, topic, headers, json) .ContinueWith(t => (JToken)t.Result) .ConfigureAwait(continueOnCapturedContext: false); Avro.IO.Decoder decoder = new JsonDecoder(ReaderSchema, json.ToString(Formatting.None)); - + datumReader = new SpecificReader(ReaderSchema, ReaderSchema); data = Read(datumReader, decoder); } @@ -196,7 +196,7 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo } } - FieldTransformer fieldTransformer = async (ctx, transform, message) => + FieldTransformer fieldTransformer = async (ctx, transform, message) => { return await AvroUtils.Transform(ctx, writerSchema, message, transform).ConfigureAwait(false); }; @@ -204,7 +204,7 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo writerSchemaJson, data, fieldTransformer) .ConfigureAwait(continueOnCapturedContext: false); - return (T) data; + return (T)data; } catch (AggregateException e) { @@ -216,7 +216,7 @@ public async Task Deserialize(string topic, Headers headers, byte[] array, bo { return Task.FromResult(Avro.Schema.Parse(schema.SchemaString)); } - + private async Task> GetDatumReader(Avro.Schema writerSchema, Avro.Schema readerSchema) { DatumReader datumReader = null; diff --git a/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificSerializerImpl.cs b/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificSerializerImpl.cs index 5bd069d4b..db9e2378b 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificSerializerImpl.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Avro/SpecificSerializerImpl.cs @@ -93,7 +93,7 @@ public SpecificSerializerImpl( { singleSchemaData = ExtractSchemaData(writerType); } - + if (config == null) { return; } if (config.BufferBytes != null) { this.initialBufferSize = config.BufferBytes.Value; } @@ -108,7 +108,7 @@ public SpecificSerializerImpl( throw new ArgumentException($"AvroSerializer: cannot enable both use.latest.version and auto.register.schemas"); } } - + private static SerializerSchemaData ExtractSchemaData(Type writerType) { SerializerSchemaData serializerSchemaData = new SerializerSchemaData(); @@ -211,10 +211,10 @@ public async Task Serialize(string topic, Headers headers, T data, bool : isKey ? schemaRegistryClient.ConstructKeySubjectName(topic, fullname) : schemaRegistryClient.ConstructValueSubjectName(topic, fullname); - + latestSchema = await GetReaderSchema(subject) .ConfigureAwait(continueOnCapturedContext: false); - + if (!currentSchemaData.SubjectsRegistered.Contains(subject)) { if (latestSchema != null) @@ -244,13 +244,13 @@ public async Task Serialize(string topic, Headers headers, T data, bool if (latestSchema != null) { var schema = await GetParsedSchema(latestSchema); - FieldTransformer fieldTransformer = async (ctx, transform, message) => + FieldTransformer fieldTransformer = async (ctx, transform, message) => { return await AvroUtils.Transform(ctx, schema, message, transform).ConfigureAwait(false); }; data = await ExecuteRules(isKey, subject, topic, headers, RuleMode.Write, null, latestSchema, data, fieldTransformer) - .ContinueWith(t => (T) t.Result) + .ContinueWith(t => (T)t.Result) .ConfigureAwait(continueOnCapturedContext: false); } @@ -271,7 +271,7 @@ public async Task Serialize(string topic, Headers headers, T data, bool throw e.InnerException; } } - + protected override Task ParseSchema(Schema schema) { return Task.FromResult(Avro.Schema.Parse(schema.SchemaString)); diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializer.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializer.cs index 01a1c50d6..d51453da4 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializer.cs @@ -54,11 +54,11 @@ namespace Confluent.SchemaRegistry.Serdes public class JsonDeserializer : AsyncDeserializer where T : class { private readonly JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings; - + private JsonSchemaValidator validator = new JsonSchemaValidator(); private JsonSchema schema = null; - + /// /// Initialize a new JsonDeserializer instance. /// @@ -74,13 +74,13 @@ public JsonDeserializer(IEnumerable> config = null, { } - public JsonDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null, JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null) + public JsonDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null, JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null) : this(schemaRegistryClient, config != null ? new JsonDeserializerConfig(config) : null, jsonSchemaGeneratorSettings) { } - public JsonDeserializer(ISchemaRegistryClient schemaRegistryClient, JsonDeserializerConfig config, - JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null, IList ruleExecutors = null) + public JsonDeserializer(ISchemaRegistryClient schemaRegistryClient, JsonDeserializerConfig config, + JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { this.jsonSchemaGeneratorSettings = jsonSchemaGeneratorSettings; @@ -155,7 +155,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i { throw new InvalidDataException($"Expecting data framing of length 6 bytes or more but total data size is {array.Length} bytes"); } - + bool isKey = context.Component == MessageComponentType.Key; string topic = context.Topic; string subject = this.subjectNameStrategy != null @@ -164,15 +164,15 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i new SerializationContext(isKey ? MessageComponentType.Key : MessageComponentType.Value, topic), null) // else fall back to the deprecated config from (or default as currently supplied by) SchemaRegistry. - : schemaRegistryClient == null + : schemaRegistryClient == null ? null - : isKey + : isKey ? schemaRegistryClient.ConstructKeySubjectName(topic) : schemaRegistryClient.ConstructValueSubjectName(topic); - + Schema latestSchema = await GetReaderSchema(subject) .ConfigureAwait(continueOnCapturedContext: false); - + try { Schema writerSchema = null; @@ -194,7 +194,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i { (writerSchema, writerSchemaJson) = await GetSchema(subject, writerId); } - + if (latestSchema != null) { migrations = await GetMigrations(subject, writerSchema, latestSchema) @@ -231,7 +231,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i using (var jsonReader = new StreamReader(jsonStream, Encoding.UTF8)) { string serializedString = jsonReader.ReadToEnd(); - + if (schema != null) { var validationResult = validator.Validate(serializedString, schema); @@ -261,8 +261,8 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i writerSchema, value, fieldTransformer) .ContinueWith(t => (T)t.Result) .ConfigureAwait(continueOnCapturedContext: false); - } - + } + return value; } catch (AggregateException e) @@ -275,7 +275,7 @@ protected override async Task ParseSchema(Schema schema) { JsonSchemaResolver utils = new JsonSchemaResolver( schemaRegistryClient, schema, jsonSchemaGeneratorSettings); - + return await utils.GetResolvedSchema(); } } diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializerConfig.cs index 26361dcef..9218841d8 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonDeserializerConfig.cs @@ -54,8 +54,8 @@ public static class PropertyNames /// public const string SubjectNameStrategy = "json.deserializer.subject.name.strategy"; } - - + + /// /// Initialize a new . /// @@ -68,7 +68,7 @@ public JsonDeserializerConfig() { } /// public JsonDeserializerConfig(IEnumerable> config) : base(config.ToDictionary(v => v.Key, v => v.Value)) { } - + /// /// Specifies whether or not the JSON deserializer should use the latest subject /// version for serialization. diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSchemaResolver.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSchemaResolver.cs index 05a500176..f8a999701 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSchemaResolver.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSchemaResolver.cs @@ -58,17 +58,19 @@ public class JsonSchemaResolver /// /// Schema generator setting to use. /// - public JsonSchemaResolver(ISchemaRegistryClient schemaRegistryClient, Schema schema, JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null){ + public JsonSchemaResolver(ISchemaRegistryClient schemaRegistryClient, Schema schema, JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null) + { this.schemaRegistryClient = schemaRegistryClient; this.root = schema; this.jsonSchemaGeneratorSettings = jsonSchemaGeneratorSettings; } - + /// /// Get the resolved JsonSchema instance for the Schema provided to /// the constructor. /// - public async Task GetResolvedSchema(){ + public async Task GetResolvedSchema() + { if (resolvedJsonSchema == null) { await CreateSchemaDictUtil(root); @@ -76,7 +78,7 @@ public async Task GetResolvedSchema(){ } return resolvedJsonSchema; } - + private async Task CreateSchemaDictUtil(Schema root) { string rootStr = root.SchemaString; diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializer.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializer.cs index 009e195ea..96c03710e 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializer.cs @@ -56,7 +56,7 @@ public class JsonSerializer : AsyncSerializer where T : class { private readonly JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings; private readonly List ReferenceList = new List(); - + private JsonSchemaValidator validator = new JsonSchemaValidator(); /// @@ -81,7 +81,7 @@ public class JsonSerializer : AsyncSerializer where T : class /// /// JSON schema generator settings. /// - public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, JsonSerializerConfig config = null, + public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, JsonSerializerConfig config = null, JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { @@ -92,7 +92,7 @@ public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, JsonSerializer : JsonSchema.FromType(this.jsonSchemaGeneratorSettings); this.schemaText = schema.ToJson(); this.schemaFullname = schema.Title; - + if (config == null) { return; } var nonJsonConfig = config @@ -135,8 +135,8 @@ public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, JsonSerializer /// /// JSON schema generator settings. /// - public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, Schema schema, JsonSerializerConfig config = null, - JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null, IList ruleExecutors = null) + public JsonSerializer(ISchemaRegistryClient schemaRegistryClient, Schema schema, JsonSerializerConfig config = null, + JsonSchemaGeneratorSettings jsonSchemaGeneratorSettings = null, IList ruleExecutors = null) : this(schemaRegistryClient, config, jsonSchemaGeneratorSettings, ruleExecutors) { foreach (var reference in schema.References) @@ -193,7 +193,7 @@ public override async Task SerializeAsync(T value, SerializationContext latestSchema = await GetReaderSchema(subject, new Schema(schemaText, ReferenceList, SchemaType.Json)) .ConfigureAwait(continueOnCapturedContext: false); - + if (!subjectsRegistered.Contains(subject)) { if (latestSchema != null) @@ -218,7 +218,7 @@ public override async Task SerializeAsync(T value, SerializationContext { serdeMutex.Release(); } - + if (latestSchema != null) { var latestSchemaJson = await GetParsedSchema(latestSchema).ConfigureAwait(false); diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializerConfig.cs index 8886299fc..474c9f4f1 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonSerializerConfig.cs @@ -79,7 +79,7 @@ public static class PropertyNames /// default: false /// public const string LatestCompatibilityStrict = "json.serializer.latest.compatibility.strict"; - + /// Specifies whether or not the JSON serializer should use the latest subject /// version with the given metadata for serialization. /// WARNING: There is no check that the latest schema is backwards compatible @@ -135,7 +135,7 @@ public bool? AutoRegisterSchemas get { return GetBool(PropertyNames.AutoRegisterSchemas); } set { SetObject(PropertyNames.AutoRegisterSchemas, value); } } - + /// /// Specifies whether to normalize schemas, which will transform schemas @@ -177,7 +177,7 @@ public bool? LatestCompatibilityStrict get { return GetBool(PropertyNames.LatestCompatibilityStrict); } set { SetObject(PropertyNames.LatestCompatibilityStrict, value); } } - + /// /// Specifies whether or not the JSON serializer should use the latest schema @@ -190,7 +190,7 @@ public IDictionary UseLatestWithMetadata get { return GetDictionaryProperty(PropertyNames.UseLatestWithMetadata); } set { SetDictionaryProperty(PropertyNames.UseLatestWithMetadata, value); } } - + /// /// Subject name strategy. diff --git a/src/Confluent.SchemaRegistry.Serdes.Json/JsonUtils.cs b/src/Confluent.SchemaRegistry.Serdes.Json/JsonUtils.cs index 243e598df..b091acf49 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Json/JsonUtils.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Json/JsonUtils.cs @@ -41,13 +41,13 @@ public static async Task Transform(RuleContext ctx, JsonSchema schema, s { return message; } - + RuleContext.FieldContext fieldContext = ctx.CurrentField(); if (fieldContext != null) { fieldContext.Type = GetType(schema); } - + if (schema.AllOf.Count > 0 || schema.AnyOf.Count > 0 || schema.OneOf.Count > 0) { JToken jsonObject = JToken.FromObject(message); @@ -65,9 +65,9 @@ public static async Task Transform(RuleContext ctx, JsonSchema schema, s } else if (schema.IsArray) { - bool isList = typeof(IList).IsAssignableFrom(message.GetType()) - || (message.GetType().IsGenericType - && (message.GetType().GetGenericTypeDefinition() == typeof(List<>) + bool isList = typeof(IList).IsAssignableFrom(message.GetType()) + || (message.GetType().IsGenericType + && (message.GetType().GetGenericTypeDefinition() == typeof(List<>) || message.GetType().GetGenericTypeDefinition() == typeof(IList<>))); if (!isList) { @@ -125,7 +125,7 @@ public static async Task Transform(RuleContext ctx, JsonSchema schema, s ISet ruleTags = ctx.Rule.Tags ?? new HashSet(); ISet intersect = new HashSet(fieldContext.Tags); intersect.IntersectWith(ruleTags); - + if (ruleTags.Count == 0 || intersect.Count != 0) { return await fieldTransform.Transform(ctx, fieldContext, message) @@ -191,7 +191,7 @@ public FieldAccessor(Type type, string fieldName) SetValue = (instance, value) => propertyInfo.SetValue(instance, value); return; } - + var fieldInfo = type.GetField(fieldName, BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance); if (fieldInfo != null) @@ -200,7 +200,7 @@ public FieldAccessor(Type type, string fieldName) SetValue = (instance, value) => fieldInfo.SetValue(instance, value); return; } - + foreach (PropertyInfo prop in type.GetProperties()) { if (prop.IsDefined(typeof(JsonPropertyAttribute))) @@ -217,7 +217,7 @@ public FieldAccessor(Type type, string fieldName) } } } - + foreach (FieldInfo field in type.GetFields()) { if (field.IsDefined(typeof(JsonPropertyAttribute))) @@ -234,7 +234,7 @@ public FieldAccessor(Type type, string fieldName) } } } - + throw new ArgumentException("Could not find field " + fieldName); } @@ -249,4 +249,4 @@ public void SetFieldValue(object message, object value) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializer.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializer.cs index 5a7c78383..db888eef1 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializer.cs @@ -50,7 +50,7 @@ namespace Confluent.SchemaRegistry.Serdes public class ProtobufDeserializer : AsyncDeserializer where T : class, IMessage, new() { private bool useDeprecatedFormat; - + private MessageParser parser; /// @@ -64,12 +64,12 @@ public ProtobufDeserializer(IEnumerable> config = n { } - public ProtobufDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null) + public ProtobufDeserializer(ISchemaRegistryClient schemaRegistryClient, IEnumerable> config = null) : this(schemaRegistryClient, config != null ? new ProtobufDeserializerConfig(config) : null) { } - public ProtobufDeserializer(ISchemaRegistryClient schemaRegistryClient, ProtobufDeserializerConfig config, + public ProtobufDeserializer(ISchemaRegistryClient schemaRegistryClient, ProtobufDeserializerConfig config, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { this.parser = new MessageParser(() => new T()); @@ -129,12 +129,12 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i new SerializationContext(isKey ? MessageComponentType.Key : MessageComponentType.Value, topic), null) // else fall back to the deprecated config from (or default as currently supplied by) SchemaRegistry. - : schemaRegistryClient == null + : schemaRegistryClient == null ? null - : isKey + : isKey ? schemaRegistryClient.ConstructKeySubjectName(topic) : schemaRegistryClient.ConstructValueSubjectName(topic); - + // Currently Protobuf does not support migration rules because of lack of support for DynamicMessage // See https://github.com/protocolbuffers/protobuf/issues/658 /* @@ -166,7 +166,7 @@ public override async Task DeserializeAsync(ReadOnlyMemory data, bool i // needed, but parsing them is the easiest way to seek to the start of // the serialized data because they are varints. var indicesLength = useDeprecatedFormat ? (int)stream.ReadUnsignedVarint() : stream.ReadVarint(); - for (int i=0; i DeserializeAsync(ReadOnlyMemory data, bool i .ContinueWith(t => (T)t.Result) .ConfigureAwait(continueOnCapturedContext: false); } - + return message; } catch (AggregateException e) diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializerConfig.cs index c7a2026a1..fc615798b 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufDeserializerConfig.cs @@ -53,7 +53,7 @@ public static class PropertyNames /// Possible values: /// public const string SubjectNameStrategy = "protobuf.deserializer.subject.name.strategy"; - + /// /// Specifies whether or not the Protobuf deserializer should deserialize message indexes /// without zig-zag encoding. @@ -62,8 +62,8 @@ public static class PropertyNames /// public const string UseDeprecatedFormat = "protobuf.deserializer.use.deprecated.format"; } - - + + /// /// Initialize a new . /// @@ -77,7 +77,7 @@ public ProtobufDeserializerConfig() { } public ProtobufDeserializerConfig(IEnumerable> config) : base(config.ToDictionary(v => v.Key, v => v.Value)) { } - + /// /// Specifies whether or not the Protobuf deserializer should use the latest subject /// version for serialization. @@ -133,8 +133,8 @@ public SubjectNameStrategy? SubjectNameStrategy else { this.properties[PropertyNames.SubjectNameStrategy] = value.ToString(); } } } - - + + /// /// Specifies whether the Protobuf deserializer should deserialize message indexes /// without zig-zag encoding. diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializer.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializer.cs index 99d1af2ae..9efef88e0 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializer.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializer.cs @@ -52,7 +52,7 @@ namespace Confluent.SchemaRegistry.Serdes /// a single 0 byte as an optimization. /// 2. The protobuf serialized data. /// - public class ProtobufSerializer : AsyncSerializer where T : IMessage, new() + public class ProtobufSerializer : AsyncSerializer where T : IMessage, new() { private bool skipKnownTypes; private bool useDeprecatedFormat; @@ -70,11 +70,11 @@ namespace Confluent.SchemaRegistry.Serdes /// /// Initialize a new instance of the ProtobufSerializer class. /// - public ProtobufSerializer(ISchemaRegistryClient schemaRegistryClient, ProtobufSerializerConfig config = null, + public ProtobufSerializer(ISchemaRegistryClient schemaRegistryClient, ProtobufSerializerConfig config = null, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { if (config == null) - { + { this.referenceSubjectNameStrategy = ReferenceSubjectNameStrategy.ReferenceName.ToDelegate(); return; } @@ -115,7 +115,7 @@ private static byte[] CreateIndexArray(MessageDescriptor md, bool useDeprecatedF var prevMd = currentMd; currentMd = currentMd.ContainingType; bool foundNested = false; - for (int i=0; i> RegisterOrGetReferences(FileDescriptor fd, SerializationContext context, bool autoRegisterSchema, bool skipKnownTypes) { var tasks = new List>(); - for (int i=0; i> t = async (dependency) => { + + Func> t = async (dependency) => + { var dependencyReferences = await RegisterOrGetReferences(dependency, context, autoRegisterSchema, skipKnownTypes).ConfigureAwait(continueOnCapturedContext: false); var subject = referenceSubjectNameStrategy(context, dependency.Name); var schema = new Schema(dependency.SerializedData.ToBase64(), dependencyReferences, SchemaType.Protobuf); @@ -267,7 +268,7 @@ public override async Task SerializeAsync(T value, SerializationContext latestSchema = await GetReaderSchema(subject) .ConfigureAwait(continueOnCapturedContext: false); - + if (!subjectsRegistered.Contains(subject)) { if (latestSchema != null) diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializerConfig.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializerConfig.cs index 97770451e..2c974059d 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializerConfig.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufSerializerConfig.cs @@ -148,8 +148,8 @@ public bool? AutoRegisterSchemas get { return GetBool(PropertyNames.AutoRegisterSchemas); } set { SetObject(PropertyNames.AutoRegisterSchemas, value); } } - - + + /// /// Specifies whether to normalize schemas, which will transform schemas /// to have a consistent format, including ordering properties and references. @@ -176,7 +176,7 @@ public bool? UseLatestVersion get { return GetBool(PropertyNames.UseLatestVersion); } set { SetObject(PropertyNames.UseLatestVersion, value); } } - + /// /// Specifies whether or not the Protobuf serializer should use the latest subject @@ -189,7 +189,7 @@ public IDictionary UseLatestWithMetadata get { return GetDictionaryProperty(PropertyNames.UseLatestWithMetadata); } set { SetDictionaryProperty(PropertyNames.UseLatestWithMetadata, value); } } - + /// /// Specifies whether or not the Protobuf serializer should skip known types @@ -202,7 +202,7 @@ public bool? SkipKnownTypes get { return GetBool(PropertyNames.SkipKnownTypes); } set { SetObject(PropertyNames.SkipKnownTypes, value); } } - + /// /// Specifies whether the Protobuf serializer should serialize message indexes @@ -215,7 +215,7 @@ public bool? UseDeprecatedFormat get { return GetBool(PropertyNames.UseDeprecatedFormat); } set { SetObject(PropertyNames.UseDeprecatedFormat, value); } } - + /// /// Subject name strategy. diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufUtils.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufUtils.cs index 887dd87cb..50407383c 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufUtils.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/ProtobufUtils.cs @@ -69,7 +69,7 @@ public static class ProtobufUtils { "google/protobuf/type.proto", GetResource("google.protobuf.type.proto") }, { "google/protobuf/wrappers.proto", GetResource("google.protobuf.wrappers.proto") } }; - + private static string GetResource(string resourceName) { var info = Assembly.GetExecutingAssembly().GetName(); @@ -90,10 +90,10 @@ public static async Task Transform(RuleContext ctx, object desc, object } RuleContext.FieldContext fieldContext = ctx.CurrentField(); - - if (typeof(IList).IsAssignableFrom(message.GetType()) - || (message.GetType().IsGenericType - && (message.GetType().GetGenericTypeDefinition() == typeof(List<>) + + if (typeof(IList).IsAssignableFrom(message.GetType()) + || (message.GetType().IsGenericType + && (message.GetType().GetGenericTypeDefinition() == typeof(List<>) || message.GetType().GetGenericTypeDefinition() == typeof(IList<>)))) { var tasks = ((IList)message) @@ -102,9 +102,9 @@ public static async Task Transform(RuleContext ctx, object desc, object object[] items = await Task.WhenAll(tasks).ConfigureAwait(false); return items.ToList(); } - else if (typeof(IDictionary).IsAssignableFrom(message.GetType()) - || (message.GetType().IsGenericType - && (message.GetType().GetGenericTypeDefinition() == typeof(Dictionary<,>) + else if (typeof(IDictionary).IsAssignableFrom(message.GetType()) + || (message.GetType().IsGenericType + && (message.GetType().GetGenericTypeDefinition() == typeof(Dictionary<,>) || message.GetType().GetGenericTypeDefinition() == typeof(IDictionary<,>)))) { return message; @@ -156,7 +156,7 @@ public static async Task Transform(RuleContext ctx, object desc, object ISet ruleTags = ctx.Rule.Tags ?? new HashSet(); ISet intersect = new HashSet(fieldContext.Tags); intersect.IntersectWith(ruleTags); - + if (ruleTags.Count == 0 || intersect.Count != 0) { if (message is ByteString) @@ -300,20 +300,20 @@ public static FileDescriptorSet Parse(string schema, IDictionary { IDictionary allImports = new Dictionary(BuiltIns); imports?.ToList().ForEach(x => allImports.Add(x.Key, x.Value)); - + var fds = new FileDescriptorSet(); fds.FileSystem = new ProtobufImports(allImports); - + fds.Add("__root.proto", true, new StringReader(schema)); foreach (KeyValuePair import in allImports) { fds.AddImportPath(import.Key); - + } fds.Process(); return fds; - } - + } + class ProtobufImports : IFileSystem { protected IDictionary Imports { get; set; } @@ -334,4 +334,4 @@ public TextReader OpenText(string path) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry.Serdes.Protobuf/Utils.cs b/src/Confluent.SchemaRegistry.Serdes.Protobuf/Utils.cs index 843e7f105..bb16eb599 100644 --- a/src/Confluent.SchemaRegistry.Serdes.Protobuf/Utils.cs +++ b/src/Confluent.SchemaRegistry.Serdes.Protobuf/Utils.cs @@ -22,27 +22,30 @@ namespace Confluent.SchemaRegistry.Serdes { internal static class Utils { - public static void WriteVarint(this Stream stream, uint value) { + public static void WriteVarint(this Stream stream, uint value) + { WriteUnsignedVarint(stream, (value << 1) ^ (value >> 31)); } - + /// /// Inspired by: https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L284 /// public static void WriteUnsignedVarint(this Stream stream, uint value) { - while ((value & 0xffffff80) != 0L) { - byte b = (byte) ((value & 0x7f) | 0x80); + while ((value & 0xffffff80) != 0L) + { + byte b = (byte)((value & 0x7f) | 0x80); stream.WriteByte(b); value >>= 7; } - stream.WriteByte((byte) value); + stream.WriteByte((byte)value); } - public static int ReadVarint(this Stream stream) { + public static int ReadVarint(this Stream stream) + { var value = ReadUnsignedVarint(stream); return (int)((value >> 1) ^ -(value & 1)); } - + /// /// Inspired by: https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L142 /// @@ -51,7 +54,8 @@ public static uint ReadUnsignedVarint(this Stream stream) int value = 0; int i = 0; int b; - while (true) { + while (true) + { b = stream.ReadByte(); if (b == -1) throw new InvalidOperationException("Unexpected end of stream reading varint."); if ((b & 0x80) == 0) { break; } diff --git a/src/Confluent.SchemaRegistry/AsyncDeserializer.cs b/src/Confluent.SchemaRegistry/AsyncDeserializer.cs index 7ae02f385..75712c1fb 100644 --- a/src/Confluent.SchemaRegistry/AsyncDeserializer.cs +++ b/src/Confluent.SchemaRegistry/AsyncDeserializer.cs @@ -24,9 +24,9 @@ namespace Confluent.SchemaRegistry { public abstract class AsyncDeserializer : AsyncSerde, IAsyncDeserializer { - protected readonly int headerSize = sizeof(int) + sizeof(byte); - - protected AsyncDeserializer(ISchemaRegistryClient schemaRegistryClient, SerdeConfig config, + protected readonly int headerSize = sizeof(int) + sizeof(byte); + + protected AsyncDeserializer(ISchemaRegistryClient schemaRegistryClient, SerdeConfig config, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { } diff --git a/src/Confluent.SchemaRegistry/AsyncSerde.cs b/src/Confluent.SchemaRegistry/AsyncSerde.cs index 9b17649a7..59299e07a 100644 --- a/src/Confluent.SchemaRegistry/AsyncSerde.cs +++ b/src/Confluent.SchemaRegistry/AsyncSerde.cs @@ -33,17 +33,17 @@ public abstract class AsyncSerde { protected ISchemaRegistryClient schemaRegistryClient; protected IList ruleExecutors; - + protected bool useLatestVersion = false; protected bool latestCompatibilityStrict = false; protected IDictionary useLatestWithMetadata = null; protected SubjectNameStrategyDelegate subjectNameStrategy = null; - + protected SemaphoreSlim serdeMutex = new SemaphoreSlim(1); - + private readonly IDictionary parsedSchemaCache = new Dictionary(); private SemaphoreSlim parsedSchemaMutex = new SemaphoreSlim(1); - + protected AsyncSerde(ISchemaRegistryClient schemaRegistryClient, SerdeConfig config, IList ruleExecutors = null) { this.schemaRegistryClient = schemaRegistryClient; @@ -54,10 +54,10 @@ protected AsyncSerde(ISchemaRegistryClient schemaRegistryClient, SerdeConfig con IEnumerable> ruleConfigs = schemaRegistryClient.Config.Concat(config .Select(kv => new KeyValuePair( kv.Key.StartsWith("rules.") ? kv.Key.Substring("rules.".Length) : kv.Key, kv.Value))); - + foreach (IRuleExecutor executor in this.ruleExecutors.Concat(RuleRegistry.GetRuleExecutors())) { - executor.Configure(ruleConfigs); + executor.Configure(ruleConfigs); } } @@ -92,9 +92,9 @@ protected async Task GetParsedSchema(Schema schema) parsedSchemaMutex.Release(); } } - + protected abstract Task ParseSchema(Schema schema); - + protected async Task> ResolveReferences(Schema schema) { IList references = schema.References; @@ -109,7 +109,7 @@ protected async Task> ResolveReferences(Schema schem .ConfigureAwait(continueOnCapturedContext: false); return result; } - + private async Task> ResolveReferences( Schema schema, IDictionary schemas, ISet visited) { @@ -165,23 +165,29 @@ protected async Task> GetMigrations(string subject, Schema writ IList versions = await GetSchemasBetween(subject, first, last) .ConfigureAwait(continueOnCapturedContext: false); Schema previous = null; - for (int i = 0; i < versions.Count; i++) { - Schema current = versions[i]; - if (i == 0) { - // skip the first version - previous = current; - continue; - } - if (current.RuleSet != null && current.RuleSet.HasRules(migrationMode)) { - Migration m; - if (migrationMode == RuleMode.Upgrade) { - m = new Migration(migrationMode, previous, current); - } else { - m = new Migration(migrationMode, current, previous); + for (int i = 0; i < versions.Count; i++) + { + Schema current = versions[i]; + if (i == 0) + { + // skip the first version + previous = current; + continue; } - migrations.Add(m); - } - previous = current; + if (current.RuleSet != null && current.RuleSet.HasRules(migrationMode)) + { + Migration m; + if (migrationMode == RuleMode.Upgrade) + { + m = new Migration(migrationMode, previous, current); + } + else + { + m = new Migration(migrationMode, current, previous); + } + migrations.Add(m); + } + previous = current; } if (migrationMode == RuleMode.Downgrade) { @@ -200,7 +206,8 @@ private async Task> GetSchemasBetween(string subject, Schema first var tasks = new List>(); int version1 = first.Version; int version2 = last.Version; - for (int i = version1 + 1; i < version2; i++) { + for (int i = version1 + 1; i < version2; i++) + { tasks.Add(schemaRegistryClient.GetRegisteredSchemaAsync(subject, i)); } RegisteredSchema[] schemas = await Task.WhenAll(tasks).ConfigureAwait(continueOnCapturedContext: false); @@ -211,7 +218,7 @@ private async Task> GetSchemasBetween(string subject, Schema first result.Add(last); return result; } - + protected async Task GetReaderSchema(string subject, Schema schema = null) { if (schemaRegistryClient == null) @@ -242,14 +249,14 @@ protected async Task GetReaderSchema(string subject, Schema sc return null; } - + protected async Task ExecuteMigrations( - IList migrations, + IList migrations, bool isKey, - String subject, + String subject, String topic, - Headers headers, - object message) + Headers headers, + object message) { foreach (Migration m in migrations) { @@ -274,13 +281,13 @@ protected async Task ExecuteMigrations( /// /// protected async Task ExecuteRules( - bool isKey, - string subject, - string topic, + bool isKey, + string subject, + string topic, Headers headers, - RuleMode ruleMode, - Schema source, - Schema target, + RuleMode ruleMode, + Schema source, + Schema target, object message, FieldTransformer fieldTransformer) { @@ -370,14 +377,14 @@ await RunAction(ctx, ruleMode, rule, message != null ? rule.OnSuccess : rule.OnF } catch (RuleException ex) { - await RunAction(ctx, ruleMode, rule, rule.OnFailure, message, + await RunAction(ctx, ruleMode, rule, rule.OnFailure, message, ex, ErrorAction.ActionType) .ConfigureAwait(continueOnCapturedContext: false); } } else { - await RunAction(ctx, ruleMode, rule, rule.OnFailure, message, + await RunAction(ctx, ruleMode, rule, rule.OnFailure, message, new RuleException("Could not find rule executor of type " + rule.Type), ErrorAction.ActionType) .ConfigureAwait(continueOnCapturedContext: false); } @@ -406,7 +413,7 @@ private static IRuleExecutor GetRuleExecutor(IList ruleExecutors, return null; } - private static async Task RunAction(RuleContext ctx, RuleMode ruleMode, + private static async Task RunAction(RuleContext ctx, RuleMode ruleMode, Rule rule, string action, object message, RuleException ex, string defaultAction) { string actionName = GetRuleActionName(rule, ruleMode, action); @@ -425,7 +432,8 @@ private static async Task RunAction(RuleContext ctx, RuleMode ruleMode, try { await ruleAction.Run(ctx, message, ex).ConfigureAwait(continueOnCapturedContext: false); - } catch (RuleException e) + } + catch (RuleException e) { throw new SerializationException("Failed to run rule action " + actionName, e); } @@ -468,7 +476,7 @@ private static IRuleAction GetRuleAction(string actionName) return action; } } - + public class Migration : IEquatable { public Migration(RuleMode ruleMode, Schema source, Schema target) @@ -477,11 +485,11 @@ public Migration(RuleMode ruleMode, Schema source, Schema target) Source = source; Target = target; } - + public RuleMode RuleMode { get; set; } - + public Schema Source { get; set; } - + public Schema Target { get; set; } public bool Equals(Migration other) diff --git a/src/Confluent.SchemaRegistry/AsyncSerializer.cs b/src/Confluent.SchemaRegistry/AsyncSerializer.cs index 6e6d7ed63..bd51d0618 100644 --- a/src/Confluent.SchemaRegistry/AsyncSerializer.cs +++ b/src/Confluent.SchemaRegistry/AsyncSerializer.cs @@ -27,15 +27,15 @@ namespace Confluent.SchemaRegistry public abstract class AsyncSerializer : AsyncSerde, IAsyncSerializer { private const int DefaultInitialBufferSize = 1024; - + protected bool autoRegisterSchema = true; protected bool normalizeSchemas = false; - + protected int initialBufferSize = DefaultInitialBufferSize; - + protected HashSet subjectsRegistered = new HashSet(); - - protected AsyncSerializer(ISchemaRegistryClient schemaRegistryClient, SerdeConfig config, + + protected AsyncSerializer(ISchemaRegistryClient schemaRegistryClient, SerdeConfig config, IList ruleExecutors = null) : base(schemaRegistryClient, config, ruleExecutors) { } diff --git a/src/Confluent.SchemaRegistry/AuthCredentialsSource.cs b/src/Confluent.SchemaRegistry/AuthCredentialsSource.cs index def76d6e3..b879d21ea 100644 --- a/src/Confluent.SchemaRegistry/AuthCredentialsSource.cs +++ b/src/Confluent.SchemaRegistry/AuthCredentialsSource.cs @@ -33,4 +33,4 @@ public enum AuthCredentialsSource /// SaslInherit } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/CachedSchemaRegistryClient.cs b/src/Confluent.SchemaRegistry/CachedSchemaRegistryClient.cs index 0d7cf4a1e..8d7a31d86 100644 --- a/src/Confluent.SchemaRegistry/CachedSchemaRegistryClient.cs +++ b/src/Confluent.SchemaRegistry/CachedSchemaRegistryClient.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2020 Confluent Inc. +// Copyright 2016-2020 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ public class CachedSchemaRegistryClient : ISchemaRegistryClient, IDisposable new Dictionary>(); private readonly MemoryCache latestVersionBySubject = new MemoryCache(new MemoryCacheOptions()); - + private readonly MemoryCache latestWithMetadataBySubject = new MemoryCache(new MemoryCacheOptions()); private readonly SemaphoreSlim cacheMutex = new SemaphoreSlim(1); @@ -110,7 +110,7 @@ public class CachedSchemaRegistryClient : ISchemaRegistryClient, IDisposable /// public const SubjectNameStrategy DefaultValueSubjectNameStrategy = SubjectNameStrategy.Topic; - + /// public IEnumerable> Config => config; @@ -173,7 +173,7 @@ public CachedSchemaRegistryClient(IEnumerable> conf { throw new ArgumentNullException("config properties must be specified."); } - + this.config = config; keySubjectNameStrategy = GetKeySubjectNameStrategy(config); @@ -229,7 +229,7 @@ public CachedSchemaRegistryClient(IEnumerable> conf throw new ArgumentException( $"Configured value for {SchemaRegistryConfig.PropertyNames.SchemaRegistryLatestCacheTtlSecs} must be an integer."); } - + var basicAuthSource = config.FirstOrDefault(prop => prop.Key.ToLower() == SchemaRegistryConfig.PropertyNames.SchemaRegistryBasicAuthCredentialsSource) .Value ?? ""; @@ -658,7 +658,7 @@ public async Task GetLatestWithMetadataAsync(string subject, RegisteredSchema schema; if (!latestWithMetadataBySubject.TryGetValue(key, out schema)) { - schema = await restService.GetLatestWithMetadataAsync(subject, metadata, ignoreDeletedSchemas).ConfigureAwait(continueOnCapturedContext: false); + schema = await restService.GetLatestWithMetadataAsync(subject, metadata, ignoreDeletedSchemas).ConfigureAwait(continueOnCapturedContext: false); MemoryCacheEntryOptions opts = new MemoryCacheEntryOptions(); if (latestCacheTtlSecs > 0) { @@ -742,4 +742,4 @@ protected virtual void Dispose(bool disposing) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/ErrorAction.cs b/src/Confluent.SchemaRegistry/ErrorAction.cs index 598d7522c..f303ffb93 100644 --- a/src/Confluent.SchemaRegistry/ErrorAction.cs +++ b/src/Confluent.SchemaRegistry/ErrorAction.cs @@ -30,12 +30,12 @@ public class ErrorAction : IRuleAction public void Configure(IEnumerable> config) { } - + public string Type() { return ActionType; } - + public Task Run(RuleContext ctx, object message, RuleException exception = null) { string msg = "Rule failed: " + ctx.Rule.Name; diff --git a/src/Confluent.SchemaRegistry/FieldRuleExecutor.cs b/src/Confluent.SchemaRegistry/FieldRuleExecutor.cs index 0be208719..3f316ac5d 100644 --- a/src/Confluent.SchemaRegistry/FieldRuleExecutor.cs +++ b/src/Confluent.SchemaRegistry/FieldRuleExecutor.cs @@ -22,7 +22,7 @@ namespace Confluent.SchemaRegistry public abstract class FieldRuleExecutor : IRuleExecutor { public abstract void Configure(IEnumerable> config); - + public abstract string Type(); public abstract IFieldTransform NewTransform(RuleContext ctx); diff --git a/src/Confluent.SchemaRegistry/IFieldTransform.cs b/src/Confluent.SchemaRegistry/IFieldTransform.cs index 0a51cb2c1..83674422b 100644 --- a/src/Confluent.SchemaRegistry/IFieldTransform.cs +++ b/src/Confluent.SchemaRegistry/IFieldTransform.cs @@ -25,7 +25,7 @@ namespace Confluent.SchemaRegistry public interface IFieldTransform : IDisposable { void Init(RuleContext ctx); - + Task Transform(RuleContext ctx, RuleContext.FieldContext fieldCtx, object fieldValue); } } diff --git a/src/Confluent.SchemaRegistry/IRuleBase.cs b/src/Confluent.SchemaRegistry/IRuleBase.cs index 9086e1e50..9c361f47c 100644 --- a/src/Confluent.SchemaRegistry/IRuleBase.cs +++ b/src/Confluent.SchemaRegistry/IRuleBase.cs @@ -29,7 +29,7 @@ public interface IRuleBase : IDisposable /// /// void Configure(IEnumerable> config); - + /// /// The type of rule executor or action /// diff --git a/src/Confluent.SchemaRegistry/ISchemaRegistryClient.cs b/src/Confluent.SchemaRegistry/ISchemaRegistryClient.cs index 333a21a04..4e0da1692 100644 --- a/src/Confluent.SchemaRegistry/ISchemaRegistryClient.cs +++ b/src/Confluent.SchemaRegistry/ISchemaRegistryClient.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.SchemaRegistry/NoneAction.cs b/src/Confluent.SchemaRegistry/NoneAction.cs index 6d8e02bff..690b71acc 100644 --- a/src/Confluent.SchemaRegistry/NoneAction.cs +++ b/src/Confluent.SchemaRegistry/NoneAction.cs @@ -25,16 +25,16 @@ namespace Confluent.SchemaRegistry public class NoneAction : IRuleAction { public static readonly string ActionType = "NONE"; - + public void Configure(IEnumerable> config) { } - + public string Type() { return ActionType; } - + public Task Run(RuleContext ctx, object message, RuleException exception = null) { return Task.CompletedTask; diff --git a/src/Confluent.SchemaRegistry/ReferenceSubjectNameStrategy.cs b/src/Confluent.SchemaRegistry/ReferenceSubjectNameStrategy.cs index 492f1c11d..5fa1ff82e 100644 --- a/src/Confluent.SchemaRegistry/ReferenceSubjectNameStrategy.cs +++ b/src/Confluent.SchemaRegistry/ReferenceSubjectNameStrategy.cs @@ -42,7 +42,7 @@ public enum ReferenceSubjectNameStrategy /// ReferenceName } - + /// /// Extension methods for the ReferenceSubjectNameStrategy type. diff --git a/src/Confluent.SchemaRegistry/Rest/Authentication/BasicAuthenticationHeaderValueProvider.cs b/src/Confluent.SchemaRegistry/Rest/Authentication/BasicAuthenticationHeaderValueProvider.cs index 9a3b8e286..2639532ab 100644 --- a/src/Confluent.SchemaRegistry/Rest/Authentication/BasicAuthenticationHeaderValueProvider.cs +++ b/src/Confluent.SchemaRegistry/Rest/Authentication/BasicAuthenticationHeaderValueProvider.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.SchemaRegistry/Rest/Authentication/IAuthenticationHeaderValueProvider.cs b/src/Confluent.SchemaRegistry/Rest/Authentication/IAuthenticationHeaderValueProvider.cs index 940965d9d..86e975260 100644 --- a/src/Confluent.SchemaRegistry/Rest/Authentication/IAuthenticationHeaderValueProvider.cs +++ b/src/Confluent.SchemaRegistry/Rest/Authentication/IAuthenticationHeaderValueProvider.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/Compatibility.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/Compatibility.cs index 89d062e76..ecedcb34d 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/Compatibility.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/Compatibility.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ public enum Compatibility /// [EnumMember(Value = "FULL")] Full, - + /// /// Forward transitive schema compatibility. /// diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/CompatibilityCheck.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/CompatibilityCheck.cs index b5ba9e3b5..9b0f03fcf 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/CompatibilityCheck.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/CompatibilityCheck.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ namespace Confluent.SchemaRegistry [DataContract] internal class CompatibilityCheck { - [DataMember(Name="is_compatible")] + [DataMember(Name = "is_compatible")] public bool IsCompatible { get; set; } public CompatibilityCheck(bool isCompatible) diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/ErrorMessage.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/ErrorMessage.cs index 6836bf267..e7609ab69 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/ErrorMessage.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/ErrorMessage.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ public ErrorMessage(int errorCode, string message) ErrorCode = errorCode; Message = message; } - + public override string ToString() => $"{{error_code={ErrorCode}, message={Message}}}"; } diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/Metadata.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/Metadata.cs index 68040278b..26fc037e7 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/Metadata.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/Metadata.cs @@ -1,4 +1,4 @@ -// Copyright 2022 Confluent Inc. +// Copyright 2022 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,8 +37,8 @@ public class Metadata : IEquatable /// private Metadata() { } - public Metadata(IDictionary> tags, - IDictionary properties, + public Metadata(IDictionary> tags, + IDictionary properties, ISet sensitive) { Tags = tags; @@ -50,10 +50,10 @@ public bool Equals(Metadata other) { if (ReferenceEquals(null, other)) return false; if (ReferenceEquals(this, other)) return true; - return Utils.DictEquals(Tags, other.Tags) && Utils.DictEquals(Properties, other.Properties) && + return Utils.DictEquals(Tags, other.Tags) && Utils.DictEquals(Properties, other.Properties) && Utils.SetEquals(Sensitive, other.Sensitive); } - + public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/RegisteredSchema.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/RegisteredSchema.cs index c0efb4442..6b3bbe8be 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/RegisteredSchema.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/RegisteredSchema.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -70,7 +70,7 @@ public Schema Schema /// Included to enable API backwards compatibility only, do not use. /// [Obsolete("Included to enable API backwards compatibility. This will be removed in a future release.")] - protected RegisteredSchema() {} + protected RegisteredSchema() { } /// /// Initializes a new instance of this class. diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/Rule.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/Rule.cs index 29cc9d16b..8d4817440 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/Rule.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/Rule.cs @@ -1,4 +1,4 @@ -// Copyright 2022 Confluent Inc. +// Copyright 2022 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,37 +25,37 @@ public class Rule : IEquatable { [DataMember(Name = "name")] public string Name { get; set; } - + [DataMember(Name = "doc")] public string Doc { get; set; } - + [DataMember(Name = "kind")] public RuleKind Kind { get; set; } - + [DataMember(Name = "mode")] public RuleMode Mode { get; set; } - + [DataMember(Name = "type")] public string Type { get; set; } - + [DataMember(Name = "tags")] public ISet Tags { get; set; } - + [DataMember(Name = "params")] public IDictionary Params { get; set; } - + [DataMember(Name = "expr")] public string Expr { get; set; } - + [DataMember(Name = "onSuccess")] public string OnSuccess { get; set; } - + [DataMember(Name = "onFailure")] public string OnFailure { get; set; } - + [DataMember(Name = "disabled")] public bool Disabled { get; set; } - + /// /// /// Empty constructor for serialization @@ -72,8 +72,8 @@ public Rule(string name, RuleKind kind, RuleMode mode, string type, ISet Tags = tags; Params = parameters; } - - public Rule(string name, RuleKind kind, RuleMode mode, string type, ISet tags, + + public Rule(string name, RuleKind kind, RuleMode mode, string type, ISet tags, IDictionary parameters, string expr, string onSuccess, string onFailure, bool disabled) { Name = name; @@ -93,8 +93,8 @@ public bool Equals(Rule other) if (ReferenceEquals(null, other)) return false; if (ReferenceEquals(this, other)) return true; return Name == other.Name && Doc == other.Doc && Kind == other.Kind && Mode == other.Mode && - Type == other.Type && Utils.SetEquals(Tags, other.Tags) && - Utils.DictEquals(Params, other.Params) && Expr == other.Expr && + Type == other.Type && Utils.SetEquals(Tags, other.Tags) && + Utils.DictEquals(Params, other.Params) && Expr == other.Expr && OnSuccess == other.OnSuccess && OnFailure == other.OnFailure && Disabled == other.Disabled; } diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleKind.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleKind.cs index eb52060f5..ec1e25d6b 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleKind.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleKind.cs @@ -1,4 +1,4 @@ -// Copyright 2022 Confluent Inc. +// Copyright 2022 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleMode.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleMode.cs index 83e29c15d..358f9ae1f 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleMode.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleMode.cs @@ -1,4 +1,4 @@ -// Copyright 2022 Confluent Inc. +// Copyright 2022 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,19 +44,19 @@ public enum RuleMode /// [EnumMember(Value = "UPDOWN")] UpDown, - + /// /// A rule used during read (consuming). /// [EnumMember(Value = "READ")] Read, - + /// /// A rule used during write (producing). /// [EnumMember(Value = "WRITE")] Write, - + /// /// A rule used during both write and read (producing and consuming). /// diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleSet.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleSet.cs index 822ff74d7..0f7e5c4b9 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleSet.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/RuleSet.cs @@ -1,4 +1,4 @@ -// Copyright 2022 Confluent Inc. +// Copyright 2022 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ public class RuleSet : IEquatable { [DataMember(Name = "migrationRules")] public IList MigrationRules { get; set; } - + [DataMember(Name = "domainRules")] public IList DomainRules { get; set; } @@ -40,46 +40,48 @@ public RuleSet(IList migrationRules, IList domainRules) MigrationRules = migrationRules; DomainRules = domainRules; } - - public bool HasRules(RuleMode mode) { - switch (mode) { - case RuleMode.Upgrade: - case RuleMode.Downgrade: - return MigrationRules.Any(r => r.Mode == mode || r.Mode == RuleMode.UpDown); - case RuleMode.UpDown: - return MigrationRules.Any(r => r.Mode == mode); - case RuleMode.Write: - case RuleMode.Read: - return DomainRules.Any(r => r.Mode == mode || r.Mode == RuleMode.Write); - case RuleMode.WriteRead: - return DomainRules.Any(r => r.Mode == mode); - default: - return false; + + public bool HasRules(RuleMode mode) + { + switch (mode) + { + case RuleMode.Upgrade: + case RuleMode.Downgrade: + return MigrationRules.Any(r => r.Mode == mode || r.Mode == RuleMode.UpDown); + case RuleMode.UpDown: + return MigrationRules.Any(r => r.Mode == mode); + case RuleMode.Write: + case RuleMode.Read: + return DomainRules.Any(r => r.Mode == mode || r.Mode == RuleMode.Write); + case RuleMode.WriteRead: + return DomainRules.Any(r => r.Mode == mode); + default: + return false; + } } - } - public bool Equals(RuleSet other) - { - if (ReferenceEquals(null, other)) return false; - if (ReferenceEquals(this, other)) return true; - return Equals(MigrationRules, other.MigrationRules) && Equals(DomainRules, other.DomainRules); - } + public bool Equals(RuleSet other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return Equals(MigrationRules, other.MigrationRules) && Equals(DomainRules, other.DomainRules); + } - public override bool Equals(object obj) - { - if (ReferenceEquals(null, obj)) return false; - if (ReferenceEquals(this, obj)) return true; - if (obj.GetType() != this.GetType()) return false; - return Equals((RuleSet)obj); - } + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((RuleSet)obj); + } - public override int GetHashCode() - { - unchecked + public override int GetHashCode() { - return ((MigrationRules != null ? MigrationRules.GetHashCode() : 0) * 397) ^ - (DomainRules != null ? DomainRules.GetHashCode() : 0); + unchecked + { + return ((MigrationRules != null ? MigrationRules.GetHashCode() : 0) * 397) ^ + (DomainRules != null ? DomainRules.GetHashCode() : 0); + } } - } } } diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/Schema.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/Schema.cs index 3b5746a70..25de53f64 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/Schema.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/Schema.cs @@ -19,7 +19,7 @@ using System.Collections.Generic; -namespace Confluent.SchemaRegistry +namespace Confluent.SchemaRegistry { /// /// Represents a schema. @@ -103,7 +103,7 @@ public Schema(string subject, int version, int id, string schemaString) [DataMember(Name = "schemaType")] internal string SchemaType_String { get; set; } - + /// /// Metadata for the schema /// diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaId.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaId.cs index 10940782a..b980b9eb0 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaId.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaId.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ using System.Runtime.Serialization; -namespace Confluent.SchemaRegistry +namespace Confluent.SchemaRegistry { [DataContract] internal class SchemaId diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaReference.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaReference.cs index a256a9567..9e00fa8c7 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaReference.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaReference.cs @@ -44,7 +44,7 @@ public class SchemaReference : IComparable, IEquatable /// Initializes a new instance of the SchemaReference class. diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaString.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaString.cs index 27ca15916..077899ddf 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaString.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaString.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ using System.Runtime.Serialization; -namespace Confluent.SchemaRegistry +namespace Confluent.SchemaRegistry { [DataContract] internal class SchemaString diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaType.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaType.cs index 522e109bb..245766826 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaType.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/SchemaType.cs @@ -15,7 +15,7 @@ // Refer to LICENSE for more information. -namespace Confluent.SchemaRegistry +namespace Confluent.SchemaRegistry { /// /// Enumerates the types of schema supported by Schema Registry. diff --git a/src/Confluent.SchemaRegistry/Rest/DataContracts/ServerConfig.cs b/src/Confluent.SchemaRegistry/Rest/DataContracts/ServerConfig.cs index b6a1e3d75..487d7968a 100644 --- a/src/Confluent.SchemaRegistry/Rest/DataContracts/ServerConfig.cs +++ b/src/Confluent.SchemaRegistry/Rest/DataContracts/ServerConfig.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,15 +24,15 @@ public class ServerConfig { [DataMember(Name = "compatibility")] public Compatibility CompatibilityLevel { get; } - + public ServerConfig(Compatibility compatibilityLevel) { CompatibilityLevel = compatibilityLevel; } - public override string ToString() + public override string ToString() => $"{{compatibility={CompatibilityLevel}}}"; - + public override bool Equals(object obj) { if (obj == null || GetType() != obj.GetType()) @@ -42,7 +42,7 @@ public override bool Equals(object obj) return CompatibilityLevel == ((ServerConfig)obj).CompatibilityLevel; } - + public override int GetHashCode() => 31 * CompatibilityLevel.GetHashCode(); } diff --git a/src/Confluent.SchemaRegistry/Rest/IRestService.cs b/src/Confluent.SchemaRegistry/Rest/IRestService.cs index 682120dc4..d059cc8a8 100644 --- a/src/Confluent.SchemaRegistry/Rest/IRestService.cs +++ b/src/Confluent.SchemaRegistry/Rest/IRestService.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/Confluent.SchemaRegistry/Rest/RestService.cs b/src/Confluent.SchemaRegistry/Rest/RestService.cs index b69559cd6..161abb16f 100644 --- a/src/Confluent.SchemaRegistry/Rest/RestService.cs +++ b/src/Confluent.SchemaRegistry/Rest/RestService.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -69,14 +69,16 @@ public RestService(string schemaRegistryUrl, int timeoutMs, { client = new HttpClient(CreateHandler(certificates, enableSslCertificateVerification)) { - BaseAddress = new Uri(uri, UriKind.Absolute), Timeout = TimeSpan.FromMilliseconds(timeoutMs) + BaseAddress = new Uri(uri, UriKind.Absolute), + Timeout = TimeSpan.FromMilliseconds(timeoutMs) }; } else { client = new HttpClient() { - BaseAddress = new Uri(uri, UriKind.Absolute), Timeout = TimeSpan.FromMilliseconds(timeoutMs) + BaseAddress = new Uri(uri, UriKind.Absolute), + Timeout = TimeSpan.FromMilliseconds(timeoutMs) }; } @@ -356,12 +358,12 @@ public async Task GetLatestWithMetadataAsync(string subject, I await RequestAsync($"subjects/{Uri.EscapeDataString(subject)}/metadata?{getKeyValuePairs(metadata)}&deleted={!ignoreDeletedSchemas}", HttpMethod.Get) .ConfigureAwait(continueOnCapturedContext: false)); - + private string getKeyValuePairs(IDictionary metadata) { return string.Join("&", metadata.Select(x => $"key={x.Key}&value={x.Value}")); } - + public async Task RegisterSchemaAsync(string subject, Schema schema, bool normalize) => (await RequestAsync( $"subjects/{Uri.EscapeDataString(subject)}/versions?normalize={normalize}", HttpMethod.Post, @@ -399,12 +401,12 @@ public async Task TestLatestCompatibilityAsync(string subject, Schema sche public async Task UpdateCompatibilityAsync(string subject, Compatibility compatibility) => (await RequestAsync( - string.IsNullOrEmpty(subject) ? "config" : $"config/{Uri.EscapeDataString(subject)}", HttpMethod.Put, + string.IsNullOrEmpty(subject) ? "config" : $"config/{Uri.EscapeDataString(subject)}", HttpMethod.Put, new ServerConfig(compatibility)) .ConfigureAwait(continueOnCapturedContext: false)).CompatibilityLevel; public async Task GetCompatibilityAsync(string subject) => (await RequestAsync( - string.IsNullOrEmpty(subject) ? "config" : $"config/{Uri.EscapeDataString(subject)}", HttpMethod.Get) + string.IsNullOrEmpty(subject) ? "config" : $"config/{Uri.EscapeDataString(subject)}", HttpMethod.Get) .ConfigureAwait(continueOnCapturedContext: false)).CompatibilityLevel; @@ -427,4 +429,4 @@ protected virtual void Dispose(bool disposing) } } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/Rest/Versions.cs b/src/Confluent.SchemaRegistry/Rest/Versions.cs index 930cbcec7..5dffd6006 100644 --- a/src/Confluent.SchemaRegistry/Rest/Versions.cs +++ b/src/Confluent.SchemaRegistry/Rest/Versions.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2018 Confluent Inc. +// Copyright 2016-2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,19 +17,19 @@ using System.Collections.Generic; -namespace Confluent.SchemaRegistry +namespace Confluent.SchemaRegistry { internal static class Versions { public const string SchemaRegistry_V1_JSON = "application/vnd.schemaregistry.v1+json"; public const string SchemaRegistry_Default_JSON = "application/vnd.schemaregistry+json"; public const string JSON = "application/json"; - - public static readonly IReadOnlyList PreferredResponseTypes = new List - { - SchemaRegistry_V1_JSON, - SchemaRegistry_Default_JSON, - JSON + + public static readonly IReadOnlyList PreferredResponseTypes = new List + { + SchemaRegistry_V1_JSON, + SchemaRegistry_Default_JSON, + JSON }; /// diff --git a/src/Confluent.SchemaRegistry/RuleConditionException.cs b/src/Confluent.SchemaRegistry/RuleConditionException.cs index 963a3e6ec..f6e238c7a 100644 --- a/src/Confluent.SchemaRegistry/RuleConditionException.cs +++ b/src/Confluent.SchemaRegistry/RuleConditionException.cs @@ -37,14 +37,14 @@ public RuleConditionException() public RuleConditionException(Rule rule) : base(getErrorMessage(rule)) { } - + private static string getErrorMessage(Rule rule) { string errMsg = rule.Doc; if (string.IsNullOrEmpty(errMsg)) { string expr = rule.Expr; - errMsg = expr != null + errMsg = expr != null ? $"Expr failed: '{expr}'" : $"Condition failed: '{rule.Name}'"; } @@ -52,4 +52,4 @@ private static string getErrorMessage(Rule rule) return errMsg; } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/RuleContext.cs b/src/Confluent.SchemaRegistry/RuleContext.cs index 314bf42ef..d95e1e4e5 100644 --- a/src/Confluent.SchemaRegistry/RuleContext.cs +++ b/src/Confluent.SchemaRegistry/RuleContext.cs @@ -166,4 +166,4 @@ public enum Type Null } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/RuleException.cs b/src/Confluent.SchemaRegistry/RuleException.cs index 4dc6645ce..f0ad644b1 100644 --- a/src/Confluent.SchemaRegistry/RuleException.cs +++ b/src/Confluent.SchemaRegistry/RuleException.cs @@ -47,4 +47,4 @@ public RuleException(string message, Exception inner) : base(message, inner) { } } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/RuleRegistry.cs b/src/Confluent.SchemaRegistry/RuleRegistry.cs index 40cc0c2d4..865e66920 100644 --- a/src/Confluent.SchemaRegistry/RuleRegistry.cs +++ b/src/Confluent.SchemaRegistry/RuleRegistry.cs @@ -29,7 +29,7 @@ public static class RuleRegistry private static IDictionary ruleExecutors = new Dictionary(); private static IDictionary ruleActions = new Dictionary(); - + public static void RegisterRuleExecutor(IRuleExecutor executor) { ruleExecutorsMutex.Wait(); @@ -45,7 +45,7 @@ public static void RegisterRuleExecutor(IRuleExecutor executor) ruleExecutorsMutex.Release(); } } - + public static bool TryGetRuleExecutor(string name, out IRuleExecutor executor) { ruleExecutorsMutex.Wait(); @@ -58,7 +58,7 @@ public static bool TryGetRuleExecutor(string name, out IRuleExecutor executor) ruleExecutorsMutex.Release(); } } - + public static List GetRuleExecutors() { ruleExecutorsMutex.Wait(); @@ -71,7 +71,7 @@ public static List GetRuleExecutors() ruleExecutorsMutex.Release(); } } - + public static void RegisterRuleAction(IRuleAction action) { ruleActionsMutex.Wait(); @@ -87,7 +87,7 @@ public static void RegisterRuleAction(IRuleAction action) ruleActionsMutex.Release(); } } - + public static bool TryGetRuleAction(string name, out IRuleAction action) { ruleActionsMutex.Wait(); @@ -100,7 +100,7 @@ public static bool TryGetRuleAction(string name, out IRuleAction action) ruleActionsMutex.Release(); } } - + public static List GetRuleActions() { ruleActionsMutex.Wait(); diff --git a/src/Confluent.SchemaRegistry/SchemaRegistryConfig.cs b/src/Confluent.SchemaRegistry/SchemaRegistryConfig.cs index 568b2b77a..52a612a5c 100644 --- a/src/Confluent.SchemaRegistry/SchemaRegistryConfig.cs +++ b/src/Confluent.SchemaRegistry/SchemaRegistryConfig.cs @@ -209,7 +209,7 @@ public string SslKeystoreLocation { get { return Get(SchemaRegistryConfig.PropertyNames.SslKeystoreLocation); } set { SetObject(SchemaRegistryConfig.PropertyNames.SslKeystoreLocation, value?.ToString()); } - + } /// @@ -458,4 +458,4 @@ public string Get(string key) /// IEnumerator IEnumerable.GetEnumerator() => this.properties.GetEnumerator(); } -} \ No newline at end of file +} diff --git a/src/Confluent.SchemaRegistry/SerdeConfig.cs b/src/Confluent.SchemaRegistry/SerdeConfig.cs index d5328be8c..aef5e191f 100644 --- a/src/Confluent.SchemaRegistry/SerdeConfig.cs +++ b/src/Confluent.SchemaRegistry/SerdeConfig.cs @@ -83,7 +83,7 @@ protected void SetDictionaryProperty(string key, IDictionary val SetObject(key, null); return; } - + var result = string.Join(",", value.Select(kv => $"{kv.Key}={kv.Value}")); SetObject(key, result); } diff --git a/src/Confluent.SchemaRegistry/Utils.cs b/src/Confluent.SchemaRegistry/Utils.cs index 5c6e16cfc..7ebacc64a 100644 --- a/src/Confluent.SchemaRegistry/Utils.cs +++ b/src/Confluent.SchemaRegistry/Utils.cs @@ -37,7 +37,7 @@ public static bool DictEquals(IDictionary a, IDictionary> a, IDictionary> b) { if (ReferenceEquals(a, b)) return true; @@ -62,8 +62,8 @@ public static bool SetEquals(ISet a, ISet b) } return true; } - - + + public static bool ListEquals(IList a, IList b) { if (ReferenceEquals(a, b)) return true; diff --git a/src/Confluent.SchemaRegistry/WildcardMatcher.cs b/src/Confluent.SchemaRegistry/WildcardMatcher.cs index 40dfc84d4..157134578 100644 --- a/src/Confluent.SchemaRegistry/WildcardMatcher.cs +++ b/src/Confluent.SchemaRegistry/WildcardMatcher.cs @@ -122,4 +122,4 @@ private static int DoubleSlashes(StringBuilder dst, char[] src, int i) return i; } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.Benchmark/BenchmarkConsumer.cs b/test/Confluent.Kafka.Benchmark/BenchmarkConsumer.cs index 45fe3a615..f26dd30cb 100644 --- a/test/Confluent.Kafka.Benchmark/BenchmarkConsumer.cs +++ b/test/Confluent.Kafka.Benchmark/BenchmarkConsumer.cs @@ -40,7 +40,7 @@ public static void BenchmarkConsumerImpl(string bootstrapServers, string topic, using (var consumer = new ConsumerBuilder(consumerConfig).Build()) { - for (var j=0; j 0) { headers = new Headers(); - for (int i=0; i(config).Build()) { - for (var j=0; j]" : "[Task]")); @@ -80,7 +80,7 @@ private static long BenchmarkProducerImpl( { var autoEvent = new AutoResetEvent(false); var msgCount = nMessages; - Action> deliveryHandler = (DeliveryReport deliveryReport) => + Action> deliveryHandler = (DeliveryReport deliveryReport) => { if (deliveryReport.Error.IsError) { @@ -157,8 +157,8 @@ private static long BenchmarkProducerImpl( var duration = DateTime.Now.Ticks - startTime; - Console.WriteLine($"Produced {nMessages} messages in {duration/10000.0:F0}ms"); - Console.WriteLine($"{nMessages / (duration/10000.0):F0}k msg/s"); + Console.WriteLine($"Produced {nMessages} messages in {duration / 10000.0:F0}ms"); + Console.WriteLine($"{nMessages / (duration / 10000.0):F0}k msg/s"); } producer.Flush(TimeSpan.FromSeconds(10)); diff --git a/test/Confluent.Kafka.Benchmark/Latency.cs b/test/Confluent.Kafka.Benchmark/Latency.cs index 35600ddc7..482411cad 100644 --- a/test/Confluent.Kafka.Benchmark/Latency.cs +++ b/test/Confluent.Kafka.Benchmark/Latency.cs @@ -43,7 +43,8 @@ public static void Run(string bootstrapServers, string topicName, string group, var monitorObj = new Object(); - var consumerTask = Task.Run(() => { + var consumerTask = Task.Run(() => + { // Use middle results only to better estimate steady state performance. var trimStart = (long)(numberOfMessages * 0.05); var trimEnd = (long)(numberOfMessages * 0.95); @@ -65,7 +66,8 @@ public static void Run(string bootstrapServers, string topicName, string group, }; using (var consumer = new ConsumerBuilder(config) - .SetPartitionsAssignedHandler((c, partitions) => { + .SetPartitionsAssignedHandler((c, partitions) => + { // Ensure there is no race between consumer determining start offsets and production starting. var initialAssignment = partitions.Select(p => new TopicPartitionOffset(p, c.QueryWatermarkOffsets(p, TimeSpan.FromSeconds(5)).High)).ToList(); if (initialAssignment.Where(p => p.Offset != 0).Count() > 0) @@ -103,7 +105,7 @@ public static void Run(string bootstrapServers, string topicName, string group, results[count++ - trimStart] = latencyMilliSeconds; if (count % (numberOfMessages / 10) == 0) { - Console.WriteLine($"...{(count / (numberOfMessages/10))}0% complete"); + Console.WriteLine($"...{(count / (numberOfMessages / 10))}0% complete"); } } @@ -115,15 +117,16 @@ public static void Run(string bootstrapServers, string topicName, string group, Console.WriteLine( "Latency percentiles (ms) [p50: {0}, p75: {1}, p90: {2}, p95: {3}, p99: {4}]", - results[(int)(results.Length * 50.0/100.0)], - results[(int)(results.Length * 75.0/100.0)], - results[(int)(results.Length * 90.0/100.0)], - results[(int)(results.Length * 95.0/100.0)], - results[(int)(results.Length * 99.0/100.0)]); + results[(int)(results.Length * 50.0 / 100.0)], + results[(int)(results.Length * 75.0 / 100.0)], + results[(int)(results.Length * 90.0 / 100.0)], + results[(int)(results.Length * 95.0 / 100.0)], + results[(int)(results.Length * 99.0 / 100.0)]); }); - var producerTask = Task.Run(() => { + var producerTask = Task.Run(() => + { lock (monitorObj) { Monitor.Wait(monitorObj); } @@ -132,7 +135,7 @@ public static void Run(string bootstrapServers, string topicName, string group, BootstrapServers = bootstrapServers, QueueBufferingMaxMessages = 2000000, MessageSendMaxRetries = 3, - RetryBackoffMs = 500 , + RetryBackoffMs = 500, LingerMs = 5, DeliveryReportFields = "none", EnableIdempotence = true, @@ -146,17 +149,17 @@ public static void Run(string bootstrapServers, string topicName, string group, if (headerCount > 0) { headers = new Headers(); - for (int i=0; i(config).Build()) { var startMilliseconds = sw.ElapsedMilliseconds; - for (int i=0; i { Value = payload, Headers = headers }, dr => { if (dr.Error.Code != ErrorCode.NoError) Console.WriteLine("Message delivery failed: " + dr.Error.Reason); }); - var desiredProduceCount = (elapsedMilliseconds - startMilliseconds)/1000.0 * messagesPerSecond; + var desiredProduceCount = (elapsedMilliseconds - startMilliseconds) / 1000.0 * messagesPerSecond; // Simple, but about as good as we can do assuming a fast enough rate, and a poor Thread.Sleep precision. if (i > desiredProduceCount) @@ -179,15 +182,15 @@ public static void Run(string bootstrapServers, string topicName, string group, } } - while (producer.Flush(TimeSpan.FromSeconds(1)) > 0); + while (producer.Flush(TimeSpan.FromSeconds(1)) > 0) ; long elapsedMilliSeconds; - lock (sw) {elapsedMilliSeconds = sw.ElapsedMilliseconds; } + lock (sw) { elapsedMilliSeconds = sw.ElapsedMilliseconds; } Console.WriteLine("Actual throughput: " + (int)Math.Round((numberOfMessages / ((double)(elapsedMilliSeconds - startMilliseconds) / 1000.0))) + " msg/s"); } }); - Task.WaitAll(new [] { producerTask, consumerTask }); + Task.WaitAll(new[] { producerTask, consumerTask }); } } } diff --git a/test/Confluent.Kafka.IntegrationTests/AdminClient_DescribeTopics.cs b/test/Confluent.Kafka.IntegrationTests/AdminClient_DescribeTopics.cs index 3e815dc6d..a732971b8 100644 --- a/test/Confluent.Kafka.IntegrationTests/AdminClient_DescribeTopics.cs +++ b/test/Confluent.Kafka.IntegrationTests/AdminClient_DescribeTopics.cs @@ -52,13 +52,13 @@ public async void AdminClient_DescribeTopics(string bootstrapServers, }).Build()) { var describeOptionsWithTimeout = new Admin.DescribeTopicsOptions() - { - RequestTimeout = TimeSpan.FromSeconds(30), + { + RequestTimeout = TimeSpan.FromSeconds(30), IncludeAuthorizedOperations = false }; var describeOptionsWithAuthOps = new Admin.DescribeTopicsOptions() - { - RequestTimeout = TimeSpan.FromSeconds(30), + { + RequestTimeout = TimeSpan.FromSeconds(30), IncludeAuthorizedOperations = true }; @@ -85,7 +85,7 @@ public async void AdminClient_DescribeTopics(string bootstrapServers, Assert.False(ex.Results.TopicDescriptions[1].Error.IsError); } - var topicListAuthOps = + var topicListAuthOps = TopicCollection.OfTopicNames( new List { @@ -98,7 +98,7 @@ public async void AdminClient_DescribeTopics(string bootstrapServers, Assert.NotEmpty(descResWithAuthOps.TopicDescriptions[0].AuthorizedOperations); Assert.Equal(8, descResWithAuthOps.TopicDescriptions[0].AuthorizedOperations.Count); - var topicACLs = new List + var topicACLs = new List { new AclBinding() { @@ -149,8 +149,8 @@ public async void AdminClient_DescribeTopics(string bootstrapServers, BootstrapServers = bootstrapServers }).Build()) { - descResWithAuthOps = await adminClientUser.DescribeTopicsAsync(topicListAuthOps, - describeOptionsWithAuthOps); + descResWithAuthOps = await adminClientUser.DescribeTopicsAsync(topicListAuthOps, + describeOptionsWithAuthOps); } var descResAuthOps = diff --git a/test/Confluent.Kafka.IntegrationTests/TemporaryTopic.cs b/test/Confluent.Kafka.IntegrationTests/TemporaryTopic.cs index 5b9fac30b..0ba1c6c03 100644 --- a/test/Confluent.Kafka.IntegrationTests/TemporaryTopic.cs +++ b/test/Confluent.Kafka.IntegrationTests/TemporaryTopic.cs @@ -25,11 +25,11 @@ namespace Confluent.Kafka public class TemporaryTopic : IDisposable { private string bootstrapServers; - + public string Name { get; set; } public TemporaryTopic(string bootstrapServers, int numPartitions) - : this("dotnet_test_", bootstrapServers, numPartitions) {} + : this("dotnet_test_", bootstrapServers, numPartitions) { } public TemporaryTopic(string prefix, string bootstrapServers, int numPartitions) { diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AclOperations.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AclOperations.cs index 6a7adfb4b..a9910d9ee 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AclOperations.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AclOperations.cs @@ -129,7 +129,7 @@ public async void AdminClient_AclOperations(string bootstrapServers) } }; - var validAndInvalidACLs = new List + var validAndInvalidACLs = new List { new AclBinding() { @@ -347,7 +347,7 @@ await adminClient.CreateAclsAsync( var describeAclsResult = await adminClient.DescribeAclsAsync(aclBindingFilters[0], describeAclsOptions); Assert.Equal(new DescribeAclsResult { - AclBindings = new List{} + AclBindings = new List { } }, describeAclsResult); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterConfigs.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterConfigs.cs index c8f5ec3d0..b872bf21f 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterConfigs.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterConfigs.cs @@ -73,42 +73,42 @@ public void AdminClient_AlterConfigs(string bootstrapServers) Assert.NotEqual("10001", describeConfigsResult[0].Entries["flush.ms"].Value); // 4. do a valid call, and check that the alteration did correctly happen. - toUpdate = new Dictionary> - { - { configResource, new List { new ConfigEntry { Name = "flush.ms", Value="10011" } } } + toUpdate = new Dictionary> + { + { configResource, new List { new ConfigEntry { Name = "flush.ms", Value="10011" } } } }; adminClient.AlterConfigsAsync(toUpdate); describeConfigsResult = adminClient.DescribeConfigsAsync(new List { configResource }).Result; Assert.Equal("10011", describeConfigsResult[0].Entries["flush.ms"].Value); // 4. test ValidateOnly = true does not update config entry. - toUpdate = new Dictionary> - { - { configResource, new List { new ConfigEntry { Name = "flush.ms", Value="20002" } } } + toUpdate = new Dictionary> + { + { configResource, new List { new ConfigEntry { Name = "flush.ms", Value="20002" } } } }; adminClient.AlterConfigsAsync(toUpdate, new AlterConfigsOptions { ValidateOnly = true }).Wait(); describeConfigsResult = adminClient.DescribeConfigsAsync(new List { configResource }).Result; Assert.Equal("10011", describeConfigsResult[0].Entries["flush.ms"].Value); // 5. test updating broker resource. - toUpdate = new Dictionary> + toUpdate = new Dictionary> { - { + { new ConfigResource { Name = "0", Type = ResourceType.Broker }, new List { new ConfigEntry { Name="num.network.threads", Value="6" } } } }; adminClient.AlterConfigsAsync(toUpdate).Wait(); // Reset to default - toUpdate = new Dictionary> + toUpdate = new Dictionary> { - { + { new ConfigResource { Name = "0", Type = ResourceType.Broker }, new List { new ConfigEntry { Name="num.network.threads", Value="3" } } } }; adminClient.AlterConfigsAsync(toUpdate).Wait(); - + // 6. test updating more than one resource. string topicName2 = Guid.NewGuid().ToString(); adminClient.CreateTopicsAsync( @@ -116,7 +116,7 @@ public void AdminClient_AlterConfigs(string bootstrapServers) Thread.Sleep(TimeSpan.FromSeconds(1)); // without this, sometimes describe topic throws unknown topic/partition error. var configResource2 = new ConfigResource { Name = topicName2, Type = ResourceType.Topic }; - toUpdate = new Dictionary> + toUpdate = new Dictionary> { { configResource, new List { new ConfigEntry { Name = "flush.ms", Value="222" } } }, { configResource2, new List { new ConfigEntry { Name = "flush.ms", Value="333" } } } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterListConsumerGroupOffsets.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterListConsumerGroupOffsets.cs index 1c5c1bf8e..a7cf6b437 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterListConsumerGroupOffsets.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_AlterListConsumerGroupOffsets.cs @@ -39,7 +39,7 @@ public void AdminClient_AlterListConsumerGroupOffsets(string bootstrapServers) var numMessages = 5; var groupID = Guid.NewGuid().ToString(); - using(var topic = new TemporaryTopic(bootstrapServers, 1)) + using (var topic = new TemporaryTopic(bootstrapServers, 1)) { // This test needs us to first produce and consume from a topic before we can list the offsets. // 1. Create topic and produce @@ -64,7 +64,8 @@ public void AdminClient_AlterListConsumerGroupOffsets(string bootstrapServers) // Create an AdminClient here - to test alter while the consumer is still active. - var adminClient = new AdminClientBuilder(new AdminClientConfig { + var adminClient = new AdminClientBuilder(new AdminClientConfig + { BootstrapServers = bootstrapServers, }).Build(); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreatePartitions.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreatePartitions.cs index 342f4e942..cf1b11b25 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreatePartitions.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreatePartitions.cs @@ -53,7 +53,7 @@ public void AdminClient_CreatePartitions(string bootstrapServers) var dr1 = producer.ProduceAsync(new TopicPartition(topicName1, 0), new Message()).Result; var dr2 = producer.ProduceAsync(new TopicPartition(topicName1, 1), new Message()).Result; - + try { producer.ProduceAsync(new TopicPartition(topicName1, 2), new Message()).Wait(); @@ -61,8 +61,8 @@ public void AdminClient_CreatePartitions(string bootstrapServers) } catch (AggregateException ex) { - Assert.IsType>(ex.InnerException); - Assert.True(((ProduceException)ex.InnerException).Error.IsError); + Assert.IsType>(ex.InnerException); + Assert.True(((ProduceException)ex.InnerException).Error.IsError); } } @@ -83,8 +83,8 @@ public void AdminClient_CreatePartitions(string bootstrapServers) } catch (AggregateException ex) { - Assert.IsType>(ex.InnerException); - Assert.True(((ProduceException)ex.InnerException).Error.IsError); + Assert.IsType>(ex.InnerException); + Assert.True(((ProduceException)ex.InnerException).Error.IsError); } } @@ -94,10 +94,10 @@ public void AdminClient_CreatePartitions(string bootstrapServers) { adminClient.CreateTopicsAsync(new TopicSpecification[] { new TopicSpecification { Name = topicName3, NumPartitions = 1, ReplicationFactor = 1 } }).Wait(); adminClient.CreatePartitionsAsync( - new List + new List { - new PartitionsSpecification { Topic = topicName2, IncreaseTo = 2, ReplicaAssignments = new List> { new List { 0 } } } - }, + new PartitionsSpecification { Topic = topicName2, IncreaseTo = 2, ReplicaAssignments = new List> { new List { 0 } } } + }, new CreatePartitionsOptions { ValidateOnly = true } ).Wait(); } @@ -111,10 +111,10 @@ public void AdminClient_CreatePartitions(string bootstrapServers) try { adminClient.CreatePartitionsAsync( - new List + new List { - new PartitionsSpecification { Topic = topicName2, IncreaseTo = 2, ReplicaAssignments = new List> { new List { 42 } } } - }, + new PartitionsSpecification { Topic = topicName2, IncreaseTo = 2, ReplicaAssignments = new List> { new List { 42 } } } + }, new CreatePartitionsOptions { ValidateOnly = true } ).Wait(); Assert.True(false, "Expecting exception"); @@ -131,8 +131,8 @@ public void AdminClient_CreatePartitions(string bootstrapServers) // more than one. using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) { - adminClient.CreateTopicsAsync(new TopicSpecification[] - { + adminClient.CreateTopicsAsync(new TopicSpecification[] + { new TopicSpecification { Name = topicName5, NumPartitions = 1, ReplicationFactor = 1 }, new TopicSpecification { Name = topicName6, NumPartitions = 1, ReplicationFactor = 1 } } @@ -141,7 +141,7 @@ public void AdminClient_CreatePartitions(string bootstrapServers) // just a simple check there wasn't an exception. adminClient.CreatePartitionsAsync( - new List + new List { new PartitionsSpecification { Topic = topicName5, IncreaseTo = 2 }, new PartitionsSpecification { Topic = topicName6, IncreaseTo = 3 } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreateTopics.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreateTopics.cs index 5a96123e4..229388c9a 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreateTopics.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_CreateTopics.cs @@ -49,7 +49,7 @@ public void AdminClient_CreateTopics(string bootstrapServers) { adminClient.CreateTopicsAsync( new TopicSpecification[] - { + { new TopicSpecification { Name = topicName1, NumPartitions = 2, ReplicationFactor = 1 }, new TopicSpecification { Name = topicName2, NumPartitions = 12, ReplicationFactor = 1 } } @@ -69,7 +69,7 @@ public void AdminClient_CreateTopics(string bootstrapServers) var deliveryReport1 = producer.ProduceAsync(topicName1, new Message()).Result; var deliveryReport2 = producer.ProduceAsync(topicName2, new Message()).Result; var deliveryReport3 = producer.ProduceAsync(topicName3, new Message()).Result; - + Assert.Equal(topicName1, deliveryReport1.Topic); Assert.Equal(topicName2, deliveryReport2.Topic); Assert.Equal(topicName3, deliveryReport3.Topic); @@ -82,8 +82,8 @@ public void AdminClient_CreateTopics(string bootstrapServers) { try { - adminClient.CreateTopicsAsync(new List - { + adminClient.CreateTopicsAsync(new List + { new TopicSpecification { Name = topicName3, NumPartitions = 1, ReplicationFactor = 1 }, new TopicSpecification { Name = topicName4, NumPartitions = 1, ReplicationFactor = 1 } } @@ -96,7 +96,7 @@ public void AdminClient_CreateTopics(string bootstrapServers) catch (AggregateException ex) { Assert.True(ex.InnerException.GetType() == typeof(CreateTopicsException)); - var cte = (CreateTopicsException) ex.InnerException; + var cte = (CreateTopicsException)ex.InnerException; Assert.Equal(2, cte.Results.Count); Assert.Single(cte.Results.Where(r => r.Error.IsError)); Assert.Single(cte.Results.Where(r => !r.Error.IsError)); @@ -110,7 +110,7 @@ public void AdminClient_CreateTopics(string bootstrapServers) using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) { adminClient.CreateTopicsAsync( - new List { new TopicSpecification { Name = topicName5, NumPartitions = 1, ReplicationFactor = 1 } }, + new List { new TopicSpecification { Name = topicName5, NumPartitions = 1, ReplicationFactor = 1 } }, new CreateTopicsOptions { ValidateOnly = true, RequestTimeout = TimeSpan.FromSeconds(30) } ).Wait(); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroup.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroup.cs index 296454a52..f4fe157b4 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroup.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroup.cs @@ -1,94 +1,94 @@ -// Copyright 2022 Confluent Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Refer to LICENSE for more information. - -using Confluent.Kafka.Admin; -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using Xunit; -using Confluent.Kafka.TestsCommon; - - -namespace Confluent.Kafka.IntegrationTests -{ - public partial class Tests - { - [Theory, MemberData(nameof(KafkaParameters))] - public void AdminClient_DeleteConsumerGroup(string bootstrapServers) - { - LogToFile("start AdminClient_DeleteConsumerGroup"); - - var groupId = Guid.NewGuid().ToString(); - var groupId2 = Guid.NewGuid().ToString(); - var groupId3 = Guid.NewGuid().ToString(); - using var topic = new TemporaryTopic(bootstrapServers, 1); - Util.ProduceNullStringMessages(bootstrapServers, topic.Name, 1, 1); - - using (var admin = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) - { - // test single delete group - CreateConsumer(bootstrapServers, groupId, topic.Name); - - admin.DeleteGroupsAsync(new List { groupId }, new DeleteGroupsOptions()).Wait(); - - var groups = admin.ListGroups(TimeSpan.FromSeconds(5)); - Assert.DoesNotContain(groups, (group) => group.Group == groupId); - - // test - // - delete two groups, one that doesn't exist. - CreateConsumer(bootstrapServers, groupId2, topic.Name); - - try - { - admin.DeleteGroupsAsync(new List {groupId2, groupId3}, new DeleteGroupsOptions()).Wait(); - Assert.True(false); // expecting exception. - } - catch (AggregateException ex) - { - var dge = (DeleteGroupsException)ex.InnerException; - Assert.Equal(2, dge.Results.Count); - Assert.Single(dge.Results.Where(r => r.Error.IsError)); - Assert.Single(dge.Results.Where(r => !r.Error.IsError)); - Assert.Equal(groupId2, dge.Results.Where(r => !r.Error.IsError).First().Group); - Assert.Equal(groupId3, dge.Results.Where(r => r.Error.IsError).First().Group); - } - }; - - Assert.Equal(0, Library.HandleCount); - LogToFile("end AdminClient_DeleteConsumerGroup"); - } - - private static void CreateConsumer(string bootstrapServers, string groupId, string topic) - { - using var consumer = new TestConsumerBuilder(new ConsumerConfig - { - BootstrapServers = bootstrapServers, - GroupId = groupId, - EnableAutoCommit = false, - AutoOffsetReset = AutoOffsetReset.Earliest - }).Build(); - - consumer.Subscribe(topic); - - var cr = consumer.Consume(TimeSpan.FromSeconds(10)); - - consumer.Commit(cr); - - consumer.Close(); - } - } -} +// Copyright 2022 Confluent Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Refer to LICENSE for more information. + +using Confluent.Kafka.Admin; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using Xunit; +using Confluent.Kafka.TestsCommon; + + +namespace Confluent.Kafka.IntegrationTests +{ + public partial class Tests + { + [Theory, MemberData(nameof(KafkaParameters))] + public void AdminClient_DeleteConsumerGroup(string bootstrapServers) + { + LogToFile("start AdminClient_DeleteConsumerGroup"); + + var groupId = Guid.NewGuid().ToString(); + var groupId2 = Guid.NewGuid().ToString(); + var groupId3 = Guid.NewGuid().ToString(); + using var topic = new TemporaryTopic(bootstrapServers, 1); + Util.ProduceNullStringMessages(bootstrapServers, topic.Name, 1, 1); + + using (var admin = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) + { + // test single delete group + CreateConsumer(bootstrapServers, groupId, topic.Name); + + admin.DeleteGroupsAsync(new List { groupId }, new DeleteGroupsOptions()).Wait(); + + var groups = admin.ListGroups(TimeSpan.FromSeconds(5)); + Assert.DoesNotContain(groups, (group) => group.Group == groupId); + + // test + // - delete two groups, one that doesn't exist. + CreateConsumer(bootstrapServers, groupId2, topic.Name); + + try + { + admin.DeleteGroupsAsync(new List { groupId2, groupId3 }, new DeleteGroupsOptions()).Wait(); + Assert.True(false); // expecting exception. + } + catch (AggregateException ex) + { + var dge = (DeleteGroupsException)ex.InnerException; + Assert.Equal(2, dge.Results.Count); + Assert.Single(dge.Results.Where(r => r.Error.IsError)); + Assert.Single(dge.Results.Where(r => !r.Error.IsError)); + Assert.Equal(groupId2, dge.Results.Where(r => !r.Error.IsError).First().Group); + Assert.Equal(groupId3, dge.Results.Where(r => r.Error.IsError).First().Group); + } + }; + + Assert.Equal(0, Library.HandleCount); + LogToFile("end AdminClient_DeleteConsumerGroup"); + } + + private static void CreateConsumer(string bootstrapServers, string groupId, string topic) + { + using var consumer = new TestConsumerBuilder(new ConsumerConfig + { + BootstrapServers = bootstrapServers, + GroupId = groupId, + EnableAutoCommit = false, + AutoOffsetReset = AutoOffsetReset.Earliest + }).Build(); + + consumer.Subscribe(topic); + + var cr = consumer.Consume(TimeSpan.FromSeconds(10)); + + consumer.Commit(cr); + + consumer.Close(); + } + } +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroupOffsets.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroupOffsets.cs index 7f29963f2..ad11e717b 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroupOffsets.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteConsumerGroupOffsets.cs @@ -43,7 +43,7 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) .SetPartitionsAssignedHandler((c, partitions) => { assignmentDone = true; - Assert.Equal(1, partitions.Count()); + Assert.Single(partitions); Assert.Equal(0, partitions[0].Partition.Value); Assert.Equal(topic1.Name, partitions[0].Topic); }).Build()) @@ -55,17 +55,17 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) consumer1.Commit(new List() { new TopicPartitionOffset(topic1.Name, 0, offsetToCommit) }); // commit some offset for consumer var committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(10)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(offsetToCommit, committedOffsets[0].Offset); List topicPartitionToReset = new List() { new TopicPartition(topic1.Name, 0) }; var res = adminClient.DeleteConsumerGroupOffsetsAsync(groupId1, topicPartitionToReset).Result; Assert.Equal(groupId1, res.Group); - Assert.Equal(1, res.Partitions.Count); + Assert.Single(res.Partitions); Assert.Equal(0, res.Partitions[0].Partition.Value); committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(1)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(Offset.Unset, committedOffsets[0].Offset); // Ensure consumer is actively subscribed to the topic @@ -79,7 +79,7 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) consumer1.Commit(new List() { new TopicPartitionOffset(topic1.Name, 0, offsetToCommit) }); // commit some offset for consumer committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(10)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(offsetToCommit, committedOffsets[0].Offset); topicPartitionToReset = new List() { new TopicPartition(topic1.Name, 0) }; @@ -93,12 +93,12 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) var dcgoe = (DeleteConsumerGroupOffsetsException)ex.InnerException; Assert.Equal(ErrorCode.Local_Partial, dcgoe.Error.Code); Assert.Equal(groupId1, dcgoe.Result.Group); - Assert.Equal(1, dcgoe.Result.Partitions.Count); + Assert.Single(dcgoe.Result.Partitions); Assert.Equal(0, dcgoe.Result.Partitions[0].Partition.Value); } committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(1)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(offsetToCommit, committedOffsets[0].Offset); // offset is unchanged as the consumer is actively subscribed to the topic consumer1.Unsubscribe(); @@ -107,17 +107,17 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) consumer1.Assign(new List() { new TopicPartition(topic2.Name, 0) }); committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(1)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(Offset.Unset, committedOffsets[0].Offset); topicPartitionToReset = new List() { new TopicPartition(topic2.Name, 0) }; res = adminClient.DeleteConsumerGroupOffsetsAsync(groupId1, topicPartitionToReset).Result; Assert.Equal(groupId1, res.Group); - Assert.Equal(1, res.Partitions.Count); + Assert.Single(res.Partitions); Assert.Equal(0, res.Partitions[0].Partition.Value); committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(1)); - Assert.Equal(1, committedOffsets.Count); + Assert.Single(committedOffsets); Assert.Equal(Offset.Unset, committedOffsets[0].Offset); // offsets are unchaged after the reset // Resetting offset for only one partiton in a multi partition topic @@ -133,7 +133,7 @@ public void AdminClient_DeleteConsumerGroupOffsets(string bootstrapServers) topicPartitionToReset = new List() { new TopicPartition(topic3.Name, 0) }; res = adminClient.DeleteConsumerGroupOffsetsAsync(groupId1, topicPartitionToReset).Result; Assert.Equal(groupId1, res.Group); - Assert.Equal(1, res.Partitions.Count); + Assert.Single(res.Partitions); Assert.Equal(0, res.Partitions[0].Partition.Value); committedOffsets = consumer1.Committed(TimeSpan.FromSeconds(1)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteRecords.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteRecords.cs index e11729bb0..f6c82b1d4 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteRecords.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteRecords.cs @@ -41,7 +41,7 @@ public void AdminClient_DeleteRecords(string bootstrapServers) using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) using (var consumer = new TestConsumerBuilder(new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = "unimportant" }).Build()) { - for (int i=0; i<10; ++i) + for (int i = 0; i < 10; ++i) { producer.Produce(topic1.Name, new Message { Value = i.ToString() }); producer.Produce(topic2.Name, new Message { Value = i.ToString() }); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteTopics.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteTopics.cs index 90d0871d7..53098532e 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteTopics.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DeleteTopics.cs @@ -39,7 +39,7 @@ public void AdminClient_DeleteTopics(string bootstrapServers) var topicName1 = Guid.NewGuid().ToString(); var topicName2 = Guid.NewGuid().ToString(); var topicName3 = Guid.NewGuid().ToString(); - + // test single delete topic. using (var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build()) { @@ -70,7 +70,7 @@ public void AdminClient_DeleteTopics(string bootstrapServers) } catch (AggregateException ex) { - var dte = (DeleteTopicsException) ex.InnerException; + var dte = (DeleteTopicsException)ex.InnerException; Assert.Equal(2, dte.Results.Count); Assert.Single(dte.Results.Where(r => r.Error.IsError)); Assert.Single(dte.Results.Where(r => !r.Error.IsError)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeCluster.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeCluster.cs index b0f08a76b..3d531e8ce 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeCluster.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeCluster.cs @@ -52,8 +52,8 @@ public async void AdminClient_DescribeCluster(string bootstrapServers, }).Build()) { var listOptionsWithTimeout = new Admin.ListConsumerGroupsOptions() { RequestTimeout = TimeSpan.FromSeconds(30) }; - var describeOptionsWithTimeout = new Admin.DescribeClusterOptions() { RequestTimeout = TimeSpan.FromSeconds(30) , IncludeAuthorizedOperations = false}; - var describeOptionsWithAuthOps = new Admin.DescribeClusterOptions() { RequestTimeout = TimeSpan.FromSeconds(30) , IncludeAuthorizedOperations = true}; + var describeOptionsWithTimeout = new Admin.DescribeClusterOptions() { RequestTimeout = TimeSpan.FromSeconds(30), IncludeAuthorizedOperations = false }; + var describeOptionsWithAuthOps = new Admin.DescribeClusterOptions() { RequestTimeout = TimeSpan.FromSeconds(30), IncludeAuthorizedOperations = true }; var descResult = await adminClient.DescribeClusterAsync(describeOptionsWithTimeout); @@ -62,8 +62,8 @@ public async void AdminClient_DescribeCluster(string bootstrapServers, descResult = await adminClient.DescribeClusterAsync(describeOptionsWithAuthOps); Assert.Equal(7, descResult.AuthorizedOperations.Count); - - var clusterACLs = new List + + var clusterACLs = new List { new AclBinding() { @@ -91,7 +91,7 @@ public async void AdminClient_DescribeCluster(string bootstrapServers, RequestTimeout = TimeSpan.FromSeconds(30) }; await adminClient.CreateAclsAsync(clusterACLs, createAclsOptions); - + using (var adminClientUser = new AdminClientBuilder(new AdminClientConfig { SecurityProtocol = SecurityProtocol.SaslPlaintext, @@ -101,7 +101,7 @@ public async void AdminClient_DescribeCluster(string bootstrapServers, BootstrapServers = bootstrapServers }).Build()) { - descResult = await adminClientUser.DescribeClusterAsync(describeOptionsWithAuthOps); + descResult = await adminClientUser.DescribeClusterAsync(describeOptionsWithAuthOps); } Assert.NotEmpty(descResult.Nodes); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeConfigs.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeConfigs.cs index 0e0442d33..05c719f73 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeConfigs.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_DescribeConfigs.cs @@ -53,7 +53,7 @@ public void AdminClient_DescribeConfigs(string bootstrapServers) // topic configs, more than one. // --- - results = adminClient.DescribeConfigsAsync(new List { + results = adminClient.DescribeConfigsAsync(new List { new ConfigResource { Name = singlePartitionTopic, Type = ResourceType.Topic }, new ConfigResource { Name = partitionedTopic, Type = ResourceType.Topic } }).Result; @@ -87,7 +87,7 @@ public void AdminClient_DescribeConfigs(string bootstrapServers) try { results = adminClient.DescribeConfigsAsync( - new List + new List { new ConfigResource { Name="invalid.name.for.resource", Type = ResourceType.Broker } } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_IncrementalAlterConfigs.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_IncrementalAlterConfigs.cs index cc22b7cb5..9f0a0bf7f 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_IncrementalAlterConfigs.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_IncrementalAlterConfigs.cs @@ -74,15 +74,15 @@ public void AdminClient_IncrementalAlterConfigs(string bootstrapServers) Assert.NotEqual("delete,compact", describeConfigsResult[0].Entries["cleanup.policy"].Value); // 4. do a valid call, and check that the alteration did correctly happen. - toUpdate = new Dictionary> - { - { + toUpdate = new Dictionary> + { + { configResource, new List { new ConfigEntry { Name = "flush.ms", Value = "10001", IncrementalOperation = AlterConfigOpType.Set }, - new ConfigEntry { Name = "cleanup.policy", Value = "compact", IncrementalOperation = AlterConfigOpType.Append } - } - } + new ConfigEntry { Name = "cleanup.policy", Value = "compact", IncrementalOperation = AlterConfigOpType.Append } + } + } }; adminClient.IncrementalAlterConfigsAsync(toUpdate); Thread.Sleep(TimeSpan.FromMilliseconds(200)); @@ -91,9 +91,9 @@ public void AdminClient_IncrementalAlterConfigs(string bootstrapServers) Assert.Equal("delete,compact", describeConfigsResult[0].Entries["cleanup.policy"].Value); // 4. test ValidateOnly = true does not update config entry. - toUpdate = new Dictionary> - { - { configResource, new List { new ConfigEntry { Name = "flush.ms", Value = "20002" , IncrementalOperation = AlterConfigOpType.Set } } } + toUpdate = new Dictionary> + { + { configResource, new List { new ConfigEntry { Name = "flush.ms", Value = "20002" , IncrementalOperation = AlterConfigOpType.Set } } } }; adminClient.IncrementalAlterConfigsAsync(toUpdate, new IncrementalAlterConfigsOptions { ValidateOnly = true }).Wait(); Thread.Sleep(TimeSpan.FromMilliseconds(200)); @@ -101,18 +101,18 @@ public void AdminClient_IncrementalAlterConfigs(string bootstrapServers) Assert.Equal("10001", describeConfigsResult[0].Entries["flush.ms"].Value); // 5. test updating broker resource. - toUpdate = new Dictionary> + toUpdate = new Dictionary> { - { + { new ConfigResource { Name = "0", Type = ResourceType.Broker }, new List { new ConfigEntry { Name = "num.network.threads", Value = "6" , IncrementalOperation = AlterConfigOpType.Set } } } }; adminClient.IncrementalAlterConfigsAsync(toUpdate).Wait(); - + // 6. test updating more than one resource. var configResource2 = new ConfigResource { Name = topicName2, Type = ResourceType.Topic }; - toUpdate = new Dictionary> + toUpdate = new Dictionary> { { configResource, new List { new ConfigEntry { Name = "flush.ms", Value = "222" , IncrementalOperation = AlterConfigOpType.Set } } }, { configResource2, new List { new ConfigEntry { Name = "flush.ms", Value = "333" , IncrementalOperation = AlterConfigOpType.Set } } } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListDescribeConsumerGroups.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListDescribeConsumerGroups.cs index 67251d01a..9e596cae2 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListDescribeConsumerGroups.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListDescribeConsumerGroups.cs @@ -77,8 +77,10 @@ public void AdminClient_ListDescribeConsumerGroups(string bootstrapServers) const string clientID2 = "test.client.2"; // Create an AdminClient here - we need it throughout the test. - using (var adminClient = new AdminClientBuilder(new AdminClientConfig { - BootstrapServers = bootstrapServers }).Build()) + using (var adminClient = new AdminClientBuilder(new AdminClientConfig + { + BootstrapServers = bootstrapServers + }).Build()) { var listOptionsWithTimeout = new Admin.ListConsumerGroupsOptions() { RequestTimeout = TimeSpan.FromSeconds(30) }; var describeOptionsWithTimeout = new Admin.DescribeConsumerGroupsOptions() @@ -169,8 +171,10 @@ public void AdminClient_ListDescribeConsumerGroups(string bootstrapServers) // Check the 'States' option by listing Stable consumer groups, which shouldn't // include `groupID`. groups = adminClient.ListConsumerGroupsAsync(new Admin.ListConsumerGroupsOptions() - { MatchStates = new List() { ConsumerGroupState.Stable }, - RequestTimeout = TimeSpan.FromSeconds(30) }).Result; + { + MatchStates = new List() { ConsumerGroupState.Stable }, + RequestTimeout = TimeSpan.FromSeconds(30) + }).Result; Assert.Empty(groups.Valid.Where(group => group.GroupId == groupID)); descResult = adminClient.DescribeConsumerGroupsAsync( diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListGroups.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListGroups.cs index 3943a5eff..96cdd2ccd 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListGroups.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListGroups.cs @@ -51,7 +51,7 @@ public void AdminClient_ListGroups(string bootstrapServers) for (var i = 0; i < 10; i++) { using var consumer = new TestConsumerBuilder(consumerConfig).Build(); - + consumer.Subscribe(topic.Name); Task.Delay(TimeSpan.FromSeconds(1)).Wait(); @@ -59,8 +59,8 @@ public void AdminClient_ListGroups(string bootstrapServers) Assert.NotNull(info); Assert.Equal(i + 1, info.Members.Count); } - + LogToFile("end AdminClient_ListGroups"); } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListOffsets.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListOffsets.cs index dee273e04..5f1189bed 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListOffsets.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_ListOffsets.cs @@ -30,19 +30,19 @@ public partial class Tests public async void AdminClient_ListOffsets(string bootstrapServers) { LogToFile("start AdminClient_ListOffsets"); - + using var topic = new TemporaryTopic(bootstrapServers, 1); using var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }).Build(); using var adminClient = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = bootstrapServers }).Build(); - + long basetimestamp = 10000000; - await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 100, TimestampType.CreateTime)}); - await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 400, TimestampType.CreateTime)}); - await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 250, TimestampType.CreateTime)}); + await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 100, TimestampType.CreateTime) }); + await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 400, TimestampType.CreateTime) }); + await producer.ProduceAsync(topic.Name, new Message { Value = "Producer Message", Timestamp = new Timestamp(basetimestamp + 250, TimestampType.CreateTime) }); producer.Flush(new TimeSpan(0, 0, 10)); - + var timeout = TimeSpan.FromSeconds(30); - ListOffsetsOptions options = new ListOffsetsOptions(){RequestTimeout = timeout, IsolationLevel = IsolationLevel.ReadUncommitted}; + ListOffsetsOptions options = new ListOffsetsOptions() { RequestTimeout = timeout, IsolationLevel = IsolationLevel.ReadUncommitted }; var testFixtures = new List> { @@ -51,7 +51,7 @@ public async void AdminClient_ListOffsets(string bootstrapServers) Tuple.Create(OffsetSpec.MaxTimestamp(), new Offset(1)), Tuple.Create(OffsetSpec.ForTimestamp(basetimestamp + 150), new Offset(1)), }; - + foreach (var fixture in testFixtures) { var offsetSpec = fixture.Item1; @@ -64,7 +64,7 @@ public async void AdminClient_ListOffsets(string bootstrapServers) OffsetSpec = offsetSpec } }; - + var listOffsetsResult = await adminClient.ListOffsetsAsync(topicPartitionOffsetSpecs, options); foreach (var resultInfo in listOffsetsResult.ResultInfos) diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_NullReferenceChecks.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_NullReferenceChecks.cs index 44e06a0f0..99fe23754 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_NullReferenceChecks.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_NullReferenceChecks.cs @@ -116,7 +116,7 @@ public void AdminClient_NullReferenceChecks(string bootstrapServers) adminClient.DeleteTopicsAsync(new List { topicName1, nullTopic }); Assert.True(false, "Expected exception."); } - catch(ArgumentException ex) + catch (ArgumentException ex) { Assert.Contains("topic", ex.Message); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_UserScram.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_UserScram.cs index c5fcc7872..b8c804892 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_UserScram.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AdminClient_UserScram.cs @@ -46,7 +46,7 @@ public async void AdminClient_UserScramCredentials(string bootstrapServers) var alterOptions = new AlterUserScramCredentialsOptions() { RequestTimeout = timeout }; List descriptions; - + try { await adminClient.DescribeUserScramCredentialsAsync(users, describeOptions); @@ -58,7 +58,7 @@ public async void AdminClient_UserScramCredentials(string bootstrapServers) foreach (var description in descriptions) { Assert.Equal(users[0], description.User); - Assert.Equal(ErrorCode.ResourceNotFound,description.Error.Code); + Assert.Equal(ErrorCode.ResourceNotFound, description.Error.Code); } } @@ -82,8 +82,9 @@ public async void AdminClient_UserScramCredentials(string bootstrapServers) { Assert.Equal(users[0], description.User); Assert.Equal(ErrorCode.NoError, description.Error.Code); - foreach(var credentialinfo in description.ScramCredentialInfos){ - Assert.Equal(15000,credentialinfo.Iterations); + foreach (var credentialinfo in description.ScramCredentialInfos) + { + Assert.Equal(15000, credentialinfo.Iterations); Assert.Equal(ScramMechanism.ScramSha256, credentialinfo.Mechanism); } } @@ -108,7 +109,7 @@ public async void AdminClient_UserScramCredentials(string bootstrapServers) foreach (var description in descriptions) { Assert.Equal(users[0], description.User); - Assert.Equal(ErrorCode.ResourceNotFound,description.Error.Code); + Assert.Equal(ErrorCode.ResourceNotFound, description.Error.Code); } } } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AssignOverloads.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AssignOverloads.cs index cf8493bb2..fa7905d29 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AssignOverloads.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AssignOverloads.cs @@ -65,7 +65,7 @@ public void AssignOverloads(string bootstrapServers) var cr = consumer.Consume(TimeSpan.FromSeconds(10)); consumer.Commit(); Assert.Equal(cr.Message.Value, testString); - + // Determine offset to consume from automatically. consumer.Assign(new List() { dr.TopicPartition }); cr = consumer.Consume(TimeSpan.FromSeconds(10)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/AssignPastEnd.cs b/test/Confluent.Kafka.IntegrationTests/Tests/AssignPastEnd.cs index 490589b4c..2a851ff08 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/AssignPastEnd.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/AssignPastEnd.cs @@ -57,10 +57,10 @@ public void AssignPastEnd(string bootstrapServers) using (var consumer = new TestConsumerBuilder(consumerConfig).Build()) { ConsumeResult record; - consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset+1) }); + consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 1) }); record = consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); - consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset+2) }); + consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 2) }); consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); } @@ -69,13 +69,13 @@ record = consumer.Consume(TimeSpan.FromSeconds(2)); using (var consumer = new TestConsumerBuilder(consumerConfig).Build()) { ConsumeResult record; - consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset+1) }); + consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 1) }); record = consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); // Note: dr.Offset+2 is an invalid (c.f. dr.Offset+1 which is valid), so auto.offset.reset will come // into play here to determine which offset to start from (earliest). Due to the produce call above, // there is guaranteed to be a message on the topic, so consumer.Consume will return true. - consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset+2) }); + consumer.Assign(new List() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 2) }); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record?.Message); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Builder_CustomDefaults.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Builder_CustomDefaults.cs index 7ba394eff..32e66a948 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Builder_CustomDefaults.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Builder_CustomDefaults.cs @@ -60,7 +60,7 @@ public override IProducer Build() this.ValueSerializer = (ISerializer)(new Utf32Serializer()); } } - + return base.Build(); } } @@ -87,12 +87,12 @@ public override IConsumer Build() this.KeyDeserializer = (IDeserializer)new Utf32Deserializer(); } } - + if (typeof(V) == typeof(string)) { if (ValueDeserializer == null) { - this.ValueDeserializer = (IDeserializer) new Utf32Deserializer(); + this.ValueDeserializer = (IDeserializer)new Utf32Deserializer(); } } @@ -112,13 +112,13 @@ public void ProducerBuilder(string bootstrapServers) { dr = p.ProduceAsync(singlePartitionTopic, new Message { Key = "abc", Value = "123" }).Result; } - + var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString() }; - + using (var c = new MyConsumerBuilder(consumerConfig).Build()) { c.Assign(dr.TopicPartitionOffset); @@ -132,8 +132,8 @@ public void ProducerBuilder(string bootstrapServers) c.Assign(dr.TopicPartitionOffset); var cr = c.Consume(TimeSpan.FromSeconds(10)); // check that each character is serialized into 4 bytes. - Assert.Equal(3*4, cr.Message.Key.Length); - Assert.Equal(3*4, cr.Message.Value.Length); + Assert.Equal(3 * 4, cr.Message.Key.Length); + Assert.Equal(3 * 4, cr.Message.Value.Length); } Assert.Equal(0, Library.HandleCount); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/CancellationDelayMax.cs b/test/Confluent.Kafka.IntegrationTests/Tests/CancellationDelayMax.cs index 1df26a347..ad8d50666 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/CancellationDelayMax.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/CancellationDelayMax.cs @@ -65,7 +65,7 @@ public void CancellationDelayMax(string bootstrapServers) consumer.Subscribe(topic.Name); // for the consumer, check that the cancellation token is honored. - for (int i=0; i<20; ++i) + for (int i = 0; i < 20; ++i) { var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(2)); var sw = Stopwatch.StartNew(); @@ -90,7 +90,7 @@ public void CancellationDelayMax(string bootstrapServers) // for the producer, make do with just a simple check that this does not throw or hang. var dr = producer.ProduceAsync(topic.Name, new Message { Key = new byte[] { 42 }, Value = new byte[] { 255 } }).Result; - + // for the admin client, make do with just simple check that this does not throw or hang. var cr = new Confluent.Kafka.Admin.ConfigResource { Type = ResourceType.Topic, Name = topic.Name }; var configs = adminClient.DescribeConfigsAsync(new ConfigResource[] { cr }).Result; diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/ClientNameVersion.cs b/test/Confluent.Kafka.IntegrationTests/Tests/ClientNameVersion.cs index e751cd4d1..6bf4484d7 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/ClientNameVersion.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/ClientNameVersion.cs @@ -61,4 +61,4 @@ public void ClientNameVersion(string bootstrapServers) LogToFile("end ClientNameVersion"); } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/ClosedHandle.cs b/test/Confluent.Kafka.IntegrationTests/Tests/ClosedHandle.cs index 9bd44a727..ecb3c16b6 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/ClosedHandle.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/ClosedHandle.cs @@ -63,7 +63,7 @@ public void Consumer_ClosedHandle(string bootstrapServers) consumer.Consume(TimeSpan.FromSeconds(10)); consumer.Dispose(); Assert.Throws(() => consumer.Consume(TimeSpan.FromSeconds(10))); - + Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_ClosedHandle"); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Assign.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Assign.cs index d7ebf178b..0ce69291c 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Assign.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Assign.cs @@ -36,7 +36,8 @@ public void Consumer_Assign(string bootstrapServers) int N = 5; - Action> test = consumer => { + Action> test = consumer => + { using (var topic = new TemporaryTopic(bootstrapServers, 1)) { Util.ProduceNullStringMessages(bootstrapServers, topic.Name, 1, N); @@ -53,7 +54,7 @@ public void Consumer_Assign(string bootstrapServers) var cr3 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.Equal(N, cr3.Offset); var p = consumer.Position(new TopicPartition(topic.Name, 0)); - Assert.Equal(N+1, p.Value); + Assert.Equal(N + 1, p.Value); consumer.Assign(new TopicPartitionOffset(topic.Name, 0, Offset.Beginning)); var cr4 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.Equal(0, cr4.Offset); @@ -79,7 +80,7 @@ public void Consumer_Assign(string bootstrapServers) { test(consumer); } - + Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_Assign"); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_AutoCommit.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_AutoCommit.cs index 2bd03fa4e..efe527449 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_AutoCommit.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_AutoCommit.cs @@ -74,7 +74,7 @@ public void Consumer_AutoCommit(string bootstrapServers) Thread.Sleep(TimeSpan.FromSeconds(3)); - var committed = consumer.Committed(new [] { new TopicPartition(singlePartitionTopic, 0) }, TimeSpan.FromSeconds(10)); + var committed = consumer.Committed(new[] { new TopicPartition(singlePartitionTopic, 0) }, TimeSpan.FromSeconds(10)); // if this was committing, would expect the committed offset to be first committed offset + N // (don't need to subtract 1 since the next message to be consumed is the value that is committed). diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Commit_Committed_Position.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Commit_Committed_Position.cs index 400329cfa..a904f4532 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Commit_Committed_Position.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Commit_Committed_Position.cs @@ -79,7 +79,7 @@ public void Consumer_Commit_Committed_Position(string bootstrapServers) } else { - throw; + throw; } } } @@ -89,7 +89,7 @@ public void Consumer_Commit_Committed_Position(string bootstrapServers) var co = consumer.Committed(new List { new TopicPartition(singlePartitionTopic, 0) }, TimeSpan.FromSeconds(10)); Assert.Equal(firstMsgOffset + 1, co[0].Offset); Assert.Equal(firstMsgOffset + 1, offset); - + // Test #2 var record2 = consumer.Consume(TimeSpan.FromMilliseconds(6000)); os = consumer.Commit(); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_CooperativeRebalance_1.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_CooperativeRebalance_1.cs index b20dbad81..1adf466ca 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_CooperativeRebalance_1.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_CooperativeRebalance_1.cs @@ -55,14 +55,17 @@ public void Consumer_CooperativeRebalance_1(string bootstrapServers) using (var topic1 = new TemporaryTopic(bootstrapServers, 1)) using (var topic2 = new TemporaryTopic(bootstrapServers, 1)) using (var consumer = new TestConsumerBuilder(consumerConfig) - .SetPartitionsAssignedHandler((c, p) => { + .SetPartitionsAssignedHandler((c, p) => + { assignCount += 1; Assert.Single(p); }) - .SetPartitionsRevokedHandler((c, p) => { + .SetPartitionsRevokedHandler((c, p) => + { revokeCount += 1; }) - .SetPartitionsLostHandler((c, p) => { + .SetPartitionsLostHandler((c, p) => + { lostCount += 1; }) .Build()) diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableHeaders.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableHeaders.cs index 1ce0061bb..c5518067a 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableHeaders.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableHeaders.cs @@ -59,7 +59,7 @@ public void Consumer_DisableHeaders(string bootstrapServers) new TestConsumerBuilder(consumerConfig) .SetErrorHandler((_, e) => Assert.True(false, e.Reason)) .Build()) - { + { consumer.Assign(new TopicPartitionOffset[] { new TopicPartitionOffset(singlePartitionTopic, 0, dr.Offset) }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableTimestamps.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableTimestamps.cs index 2f8894b14..1e865ce03 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableTimestamps.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_DisableTimestamps.cs @@ -59,7 +59,7 @@ public void Consumer_DisableTimestamps(string bootstrapServers) new TestConsumerBuilder(consumerConfig) .SetErrorHandler((_, e) => Assert.True(false, e.Reason)) .Build()) - { + { consumer.Assign(new TopicPartitionOffset[] { new TopicPartitionOffset(singlePartitionTopic, 0, dr.Offset) }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); @@ -68,7 +68,7 @@ public void Consumer_DisableTimestamps(string bootstrapServers) Assert.Equal(TimestampType.NotAvailable, record.Message.Timestamp.Type); Assert.Equal(0, record.Message.Timestamp.UnixTimestampMs); } - + Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_DisableTimestamps"); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Exiting.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Exiting.cs index 65abda462..602ee9f62 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Exiting.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Exiting.cs @@ -43,7 +43,7 @@ public void Consumer_Exiting(string bootstrapServers) SessionTimeoutMs = 6000 }; - for (int i=0; i<4; ++i) + for (int i = 0; i < 4; ++i) { consumerConfig.Set("group.id", Guid.NewGuid().ToString()); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_IncrementalAssign.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_IncrementalAssign.cs index fdc44b9bf..c7c96af42 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_IncrementalAssign.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_IncrementalAssign.cs @@ -48,7 +48,7 @@ public void Consumer_IncrementalAssign(string bootstrapServers) { Util.ProduceNullStringMessages(bootstrapServers, topic1.Name, 1, 1); Util.ProduceNullStringMessages(bootstrapServers, topic2.Name, 1, 1); - + consumer.IncrementalAssign(new List { new TopicPartitionOffset(topic1.Name, 0, Offset.Beginning) }); var cr1 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(cr1); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_MissingCommits.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_MissingCommits.cs index fc9d8ca7e..b59635097 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_MissingCommits.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_MissingCommits.cs @@ -46,16 +46,16 @@ public void Consumer_MissingCommits(string bootstrapServers) using (var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }).Build()) using (var consumer = new TestConsumerBuilder(new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = groupId }).Build()) { - for (int i=0; i { Value = "test" }); } } producer.Flush(); - for (int i=0; i(); try { - for (int i=0; i(new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = groupId, AutoOffsetReset = AutoOffsetReset.Earliest }).Build(); consumers.Add(consumer); @@ -82,7 +82,7 @@ public void Consumer_MissingCommits(string bootstrapServers) complete = true; } - for (int i=0; i(consumerConfig) .SetOffsetsCommittedHandler((_, o) => diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_OffsetsForTimes.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_OffsetsForTimes.cs index 233833f42..adbb58a6e 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_OffsetsForTimes.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_OffsetsForTimes.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -58,7 +58,7 @@ public void Consumer_OffsetsForTimes(string bootstrapServers) // If empty request, expect empty result. var result = consumer.OffsetsForTimes(new TopicPartitionTimestamp[0], timeout).ToList(); Assert.Empty(result); - + // Getting the offset for the first produced message timestamp. result = consumer.OffsetsForTimes( new[] { new TopicPartitionTimestamp(firstMessage.TopicPartition, firstMessage.Timestamp) }, @@ -130,10 +130,10 @@ private static DeliveryResult[] ProduceMessages(string bootstrap var message = producer.ProduceAsync( new TopicPartition(topic, partition), new Message - { + { Key = Serializers.Utf8.Serialize($"test key {index}", SerializationContext.Empty), Value = Serializers.Utf8.Serialize($"test val {index}", SerializationContext.Empty), - Timestamp = new Timestamp(baseTime + index*1000, TimestampType.CreateTime), + Timestamp = new Timestamp(baseTime + index * 1000, TimestampType.CreateTime), Headers = null } ).Result; diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Poll_MessageError.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Poll_MessageError.cs index 4192724a0..7ee2ce2b8 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Poll_MessageError.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Poll_MessageError.cs @@ -41,7 +41,7 @@ public void Consumer_Poll_MessageError(string bootstrapServers) "for topics not in local cache"); return; } - + LogToFile("start Consumer_Poll_MessageError"); var consumerConfig = new ConsumerConfig diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Seek.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Seek.cs index 143d1cc6b..874519088 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Seek.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Seek.cs @@ -61,7 +61,7 @@ public void Consumer_Seek(string bootstrapServers) Assert.NotNull(record.Message); // check leader epoch of first record Assert.Equal(0, record.LeaderEpoch); - + record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); record = consumer.Consume(TimeSpan.FromSeconds(10)); @@ -75,18 +75,18 @@ record = consumer.Consume(TimeSpan.FromSeconds(10)); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(checkValue, record.Message.Value); - + consumer.Seek(firstRecord.TopicPartitionOffset); - + // position shouldn't be equal to the seek position. var tpo = consumer.PositionTopicPartitionOffset(record.TopicPartition); Assert.NotEqual(firstRecord.Offset, tpo.Offset); - + record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(checkValue, record.Message.Value); Assert.Equal(0, record.LeaderEpoch); - + // position should be equal to last consumed message position + 1. tpo = consumer.PositionTopicPartitionOffset(record.TopicPartition); Assert.Equal(record.Offset + 1, tpo.Offset); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset.cs index 7789c9bbb..49aff8ff7 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset_ErrState.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset_ErrState.cs index 3329d1071..0870062a0 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset_ErrState.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_StoreOffset_ErrState.cs @@ -63,9 +63,9 @@ public void Consumer_StoreOffset_ErrState(string bootstrapServers) // wait until each consumer is assigned to one partition. consumer2.Consume(TimeSpan.FromSeconds(10)); consumer1.Consume(TimeSpan.FromSeconds(10)); - + cr = consumer2.Consume(TimeSpan.FromSeconds(10)); - Assert.Equal(1, consumer1.Assignment.Count); + Assert.Single(consumer1.Assignment); // StoreOffset should throw when attempting to assign to a // partition no longer assigned. diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription.cs index 9701b520a..0029966b3 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription.cs @@ -34,7 +34,7 @@ public partial class Tests public void Consumer_Subscription(string bootstrapServers) { LogToFile("start Consumer_Subscription"); - + int N = 2; var firstProduced = Util.ProduceNullStringMessages(bootstrapServers, singlePartitionTopic, 1, N); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription_DisjointTopics.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription_DisjointTopics.cs index 09eeb7a93..7661f2c6b 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription_DisjointTopics.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Consumer_Subscription_DisjointTopics.cs @@ -162,7 +162,7 @@ private void DisjointTopicsSubscribeTest(String bootstrapServers, PartitionAssig consumer5.Consume(TimeSpan.FromSeconds(10)); consumer6.Consume(TimeSpan.FromSeconds(10)); - Assert.Equal(0, consumer1.Assignment.Count); + Assert.Empty(consumer1.Assignment); // Allow rebalance to complete consumer1.Subscribe(topic1.Name); consumer1.Consume(TimeSpan.FromSeconds(10)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/GarbageCollect.cs b/test/Confluent.Kafka.IntegrationTests/Tests/GarbageCollect.cs index ec38e10ad..201c29eab 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/GarbageCollect.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/GarbageCollect.cs @@ -58,7 +58,7 @@ public void GarbageCollect(string bootstrapServers) GC.Collect(); // if an attempt is made to free an unmanaged resource a second time // in an object finalizer, the call to .Collect() will likely segfault. - + LogToFile("end GarbageCollect"); } } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Headers.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Headers.cs index 75d675a22..8a433a98d 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Headers.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Headers.cs @@ -54,7 +54,7 @@ public void MessageHeaderProduceConsume(string bootstrapServers) { // single header value. var headers = new Headers(); - headers.Add("test-header", new byte[] { 142 } ); + headers.Add("test-header", new byte[] { 142 }); dr_single = producer.ProduceAsync( singlePartitionTopic, new Message { Value = "the value", Headers = headers }).Result; @@ -77,8 +77,8 @@ public void MessageHeaderProduceConsume(string bootstrapServers) // multiple header values (also Headers no Dictionary, since order is tested). var headers2 = new Headers(); - headers2.Add("test-header-a", new byte[] { 111 } ); - headers2.Add("test-header-b", new byte[] { 112 } ); + headers2.Add("test-header-a", new byte[] { 111 }); + headers2.Add("test-header-b", new byte[] { 112 }); dr_multiple = producer.ProduceAsync( singlePartitionTopic, new Message { Value = "the value", Headers = headers2 }).Result; @@ -90,11 +90,11 @@ public void MessageHeaderProduceConsume(string bootstrapServers) // duplicate header values (also List not Dictionary) var headers3 = new Headers(); - headers3.Add(new Header("test-header-a", new byte[] { 111 } )); - headers3.Add(new Header("test-header-b", new byte[] { 112 } )); - headers3.Add(new Header("test-header-a", new byte[] { 113 } )); - headers3.Add(new Header("test-header-b", new byte[] { 114 } )); - headers3.Add(new Header("test-header-c", new byte[] { 115 } )); + headers3.Add(new Header("test-header-a", new byte[] { 111 })); + headers3.Add(new Header("test-header-b", new byte[] { 112 })); + headers3.Add(new Header("test-header-a", new byte[] { 113 })); + headers3.Add(new Header("test-header-b", new byte[] { 114 })); + headers3.Add(new Header("test-header-c", new byte[] { 115 })); dr_duplicate = producer.ProduceAsync(singlePartitionTopic, new Message { Value = "the value", Headers = headers3 }).Result; Assert.Equal(5, dr_duplicate.Message.Headers.Count); Assert.Equal("test-header-a", dr_duplicate.Message.Headers[0].Key); @@ -120,8 +120,8 @@ public void MessageHeaderProduceConsume(string bootstrapServers) producer.Produce(singlePartitionTopic, new Message { Value = "the value" }, dh); producer.Produce( - new TopicPartition(singlePartitionTopic, 0), - new Message { Value = "the value", Headers = headers2}, + new TopicPartition(singlePartitionTopic, 0), + new Message { Value = "the value", Headers = headers2 }, dh); producer.Flush(TimeSpan.FromSeconds(10)); @@ -167,27 +167,27 @@ public void MessageHeaderProduceConsume(string bootstrapServers) using (var consumer = new TestConsumerBuilder(consumerConfig).Build()) { - consumer.Assign(new List() {dr_single.TopicPartitionOffset}); + consumer.Assign(new List() { dr_single.TopicPartitionOffset }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Single(record.Message.Headers); Assert.Equal("test-header", record.Message.Headers[0].Key); Assert.Equal(new byte[] { 142 }, record.Message.Headers[0].GetValueBytes()); - consumer.Assign(new List() {dr_empty.TopicPartitionOffset}); + consumer.Assign(new List() { dr_empty.TopicPartitionOffset }); var record2 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record2.Message); // following Java, alway instantiate a new Headers instance, even in the empty case. Assert.NotNull(record2.Message.Headers); Assert.Empty(record2.Message.Headers); - consumer.Assign(new List() {dr_null.TopicPartitionOffset}); + consumer.Assign(new List() { dr_null.TopicPartitionOffset }); var record3 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record3.Message); Assert.NotNull(record3.Message.Headers); Assert.Empty(record3.Message.Headers); - consumer.Assign(new List() {dr_multiple.TopicPartitionOffset}); + consumer.Assign(new List() { dr_multiple.TopicPartitionOffset }); var record4 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record4.Message); Assert.Equal(2, record4.Message.Headers.Count); @@ -196,7 +196,7 @@ public void MessageHeaderProduceConsume(string bootstrapServers) Assert.Equal(new byte[] { 111 }, record4.Message.Headers[0].GetValueBytes()); Assert.Equal(new byte[] { 112 }, record4.Message.Headers[1].GetValueBytes()); - consumer.Assign(new List() {dr_duplicate.TopicPartitionOffset}); + consumer.Assign(new List() { dr_duplicate.TopicPartitionOffset }); var record5 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record5.Message); Assert.Equal(5, record5.Message.Headers.Count); @@ -217,65 +217,65 @@ public void MessageHeaderProduceConsume(string bootstrapServers) // Test headers work with all produce method variants. // async, serializing - consumer.Assign(new List() {dr_ol1.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol1.TopicPartitionOffset }); var record6 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record6.Message); Assert.Empty(record6.Message.Headers); - consumer.Assign(new List() {dr_ol3.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol3.TopicPartitionOffset }); var record8 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record8.Message); Assert.Single(record8.Message.Headers); // delivery-handler, serializing. - consumer.Assign(new List() {drs[0].TopicPartitionOffset}); + consumer.Assign(new List() { drs[0].TopicPartitionOffset }); var record9 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record9.Message); Assert.Empty(record9.Message.Headers); - consumer.Assign(new List() {drs[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs[1].TopicPartitionOffset }); var record11 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record11.Message); Assert.Equal(2, record11.Message.Headers.Count); // async, non-serializing - consumer.Assign(new List() {dr_ol4.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol4.TopicPartitionOffset }); var record12 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record12.Message); Assert.Empty(record12.Message.Headers); - consumer.Assign(new List() {dr_ol5.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol5.TopicPartitionOffset }); var record13 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record13.Message); Assert.Empty(record13.Message.Headers); - consumer.Assign(new List() {dr_ol6.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol6.TopicPartitionOffset }); var record14 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record14.Message); Assert.Single(record14.Message.Headers); - consumer.Assign(new List() {dr_ol7.TopicPartitionOffset}); + consumer.Assign(new List() { dr_ol7.TopicPartitionOffset }); var record15 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record15.Message); Assert.Single(record15.Message.Headers); // delivery handler, non-serializing - consumer.Assign(new List() {drs_2[0].TopicPartitionOffset}); + consumer.Assign(new List() { drs_2[0].TopicPartitionOffset }); var record16 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record16.Message); Assert.Single(record16.Message.Headers); - consumer.Assign(new List() {drs_2[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs_2[1].TopicPartitionOffset }); var record17 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record17.Message); Assert.Empty(record17.Message.Headers); - consumer.Assign(new List() {drs_2[2].TopicPartitionOffset}); + consumer.Assign(new List() { drs_2[2].TopicPartitionOffset }); var record18 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record18.Message); Assert.Single(record18.Message.Headers); - consumer.Assign(new List() {drs_2[3].TopicPartitionOffset}); + consumer.Assign(new List() { drs_2[3].TopicPartitionOffset }); var record19 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record19.Message); Assert.Single(record19.Message.Headers); @@ -288,7 +288,7 @@ public void MessageHeaderProduceConsume(string bootstrapServers) var threw = false; try { - headers.Add(null, new byte[] { 142 } ); + headers.Add(null, new byte[] { 142 }); } catch { diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Headers_SerializationContext.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Headers_SerializationContext.cs index cfcccc5d6..59e6a63ed 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Headers_SerializationContext.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Headers_SerializationContext.cs @@ -88,7 +88,7 @@ public void HeadersSerializationContext(string bootstrapServers) Assert.Equal(new byte[] { 100, 42 }, testHeader); } - + // Test accumulation of headers using (var topic = new TemporaryTopic(bootstrapServers, 1)) using (var producer = new TestProducerBuilder(producerConfig) @@ -115,4 +115,4 @@ public void HeadersSerializationContext(string bootstrapServers) LogToFile("end Headers_SerializationContext"); } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Metadata.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Metadata.cs index ef5e94f8a..938990de0 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Metadata.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Metadata.cs @@ -49,26 +49,26 @@ public void Metadata(string bootstrapServers) Assert.Equal(deserialized.Value("OriginatingBrokerName"), metadata.OriginatingBrokerName); var topics = new List(deserialized["Topics"].Children()); Assert.Equal(metadata.Topics.Count, topics.Count); - for (int i=0; i("Error"), metadata.Topics[i].Error.Code.ToString()); Assert.Equal(topics[i].Value("Topic"), metadata.Topics[i].Topic); var partitions = new List(topics[i]["Partitions"].Children()); Assert.Equal(partitions.Count, metadata.Topics[i].Partitions.Count); - for (int j=0; j("Error"), metadata.Topics[i].Partitions[j].Error.Code.ToString()); Assert.Equal(partitions[j].Value("Leader"), metadata.Topics[i].Partitions[j].Leader); Assert.Equal(partitions[j].Value("PartitionId"), metadata.Topics[i].Partitions[j].PartitionId); var replicas = new List(partitions[j]["Replicas"].Children()); Assert.Equal(replicas.Count, metadata.Topics[i].Partitions[j].Replicas.Length); - for (int k=0; k(), metadata.Topics[i].Partitions[j].Replicas[k]); } var inSyncReplicas = new List(partitions[j]["InSyncReplicas"].Children()); Assert.Equal(inSyncReplicas.Count, metadata.Topics[i].Partitions[j].InSyncReplicas.Length); - for (int k=0; k(), metadata.Topics[i].Partitions[j].InSyncReplicas[k]); } @@ -77,7 +77,7 @@ public void Metadata(string bootstrapServers) var brokers = new List(deserialized["Brokers"].Children()); Assert.Equal(metadata.Brokers.Count, brokers.Count); - for (int i=0; i("BrokerId")); Assert.Equal(metadata.Brokers[i].Host, brokers[i].Value("Host")); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/NullVsEmpty.cs b/test/Confluent.Kafka.IntegrationTests/Tests/NullVsEmpty.cs index a9ff3c53f..40d04cbd4 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/NullVsEmpty.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/NullVsEmpty.cs @@ -48,9 +48,9 @@ public void NullVsEmpty(string bootstrapServers) { // Assume that all these produce calls succeed. dr = producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = null, Value = null }).Result; - producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = null, Value = new byte[0] {} }).Wait(); - producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = new byte[0] {}, Value = null }).Wait(); - producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = new byte[0] {}, Value = new byte[0] {} }).Wait(); + producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = null, Value = new byte[0] { } }).Wait(); + producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = new byte[0] { }, Value = null }).Wait(); + producer.ProduceAsync(new TopicPartition(singlePartitionTopic, 0), new Message { Key = new byte[0] { }, Value = new byte[0] { } }).Wait(); producer.Flush(TimeSpan.FromSeconds(10)); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_Delegate.cs b/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_Delegate.cs index f53eea052..6164ca994 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_Delegate.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_Delegate.cs @@ -62,4 +62,4 @@ public void OAuthBearerToken_Delegate(string bootstrapServers) LogToFileEndTest(); } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_PublishConsume.cs b/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_PublishConsume.cs index f45600c02..817d4a7f6 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_PublishConsume.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/OauthBearerToken_PublishConsume.cs @@ -211,4 +211,4 @@ void AssertError(Error error) } } } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/OnPartitionsAssignedNotSet.cs b/test/Confluent.Kafka.IntegrationTests/Tests/OnPartitionsAssignedNotSet.cs index c5716d383..31de76ddc 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/OnPartitionsAssignedNotSet.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/OnPartitionsAssignedNotSet.cs @@ -62,7 +62,7 @@ public void OnPartitionsAssignedNotSet(string bootstrapServers) consumer.Close(); } - + Assert.Equal(0, Library.HandleCount); LogToFile("end OnPartitionsAssignedNotSet"); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_CustomPartitioner.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_CustomPartitioner.cs index 41c9514a9..41bf1b752 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_CustomPartitioner.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_CustomPartitioner.cs @@ -41,7 +41,7 @@ public void Producer_CustomPartitioner(string bootstrapServers) BootstrapServers = bootstrapServers, }; - for (int j=0; j<3; ++j) + for (int j = 0; j < 3; ++j) { using (var topic = new TemporaryTopic(bootstrapServers, PARTITION_COUNT)) { @@ -104,7 +104,7 @@ public void Producer_CustomPartitioner(string bootstrapServers) using (var producer = producerBuilder.Build()) { - for (int i=0; i> dh = (DeliveryReport dr) => { diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_OptimizeDeliveryReports.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_OptimizeDeliveryReports.cs index 590147ec9..f11d671cc 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_OptimizeDeliveryReports.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_OptimizeDeliveryReports.cs @@ -36,7 +36,7 @@ public async void Producer_OptimizeDeliveryReports(string bootstrapServers) byte[] TestValue = new byte[] { 5, 6, 7, 8 }; var producerConfig = new ProducerConfig - { + { BootstrapServers = bootstrapServers, DeliveryReportFields = "none" }; @@ -47,12 +47,12 @@ public async void Producer_OptimizeDeliveryReports(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { var dr = await producer.ProduceAsync( - singlePartitionTopic, - new Message - { - Key = TestKey, - Value = TestValue, - Headers = new Headers() { new Header("my-header", new byte[] { 42 }) } + singlePartitionTopic, + new Message + { + Key = TestKey, + Value = TestValue, + Headers = new Headers() { new Header("my-header", new byte[] { 42 }) } } ); Assert.Equal(TimestampType.NotAvailable, dr.Timestamp.Type); @@ -68,12 +68,12 @@ public async void Producer_OptimizeDeliveryReports(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { var dr = await producer.ProduceAsync( - singlePartitionTopic, + singlePartitionTopic, new Message - { - Key = TestKey, - Value = TestValue, - Headers = new Headers() { new Header("my-header", new byte[] { 42 }) } + { + Key = TestKey, + Value = TestValue, + Headers = new Headers() { new Header("my-header", new byte[] { 42 }) } } ); Assert.Equal(TimestampType.NotAvailable, dr.Timestamp.Type); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll.cs index 44f1d523a..6ee9ca731 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll.cs @@ -1,4 +1,4 @@ -// Copyright 2019 Confluent Inc. +// Copyright 2019 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll_Backoff.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll_Backoff.cs index 67ef25867..0e1221ad8 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll_Backoff.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Poll_Backoff.cs @@ -1,4 +1,4 @@ -// Copyright 2019 Confluent Inc. +// Copyright 2019 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ public void Producer_Poll_Backoff(string bootstrapServers) Stopwatch sw = new Stopwatch(); sw.Start(); var exceptionCount = 0; - for (int i=0; i<11; ++i) + for (int i = 0; i < 11; ++i) { try { diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce.cs index e21ceffa8..a34280d94 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce.cs @@ -35,7 +35,7 @@ public void Producer_Produce(string bootstrapServers) LogToFile("start Producer_Produce"); var producerConfig = new ProducerConfig - { + { BootstrapServers = bootstrapServers, EnableIdempotence = true, LingerMs = 1.5 @@ -62,7 +62,7 @@ public void Producer_Produce(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { producer.Produce( - new TopicPartition(singlePartitionTopic, 0), + new TopicPartition(singlePartitionTopic, 0), new Message { Key = "test key 0", Value = "test val 0" }, dh); producer.Produce( @@ -95,11 +95,11 @@ public void Producer_Produce(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { producer.Produce( - new TopicPartition(singlePartitionTopic, 0), + new TopicPartition(singlePartitionTopic, 0), new Message { Key = Encoding.UTF8.GetBytes("test key 42"), Value = Encoding.UTF8.GetBytes("test val 42") }, dh2); producer.Produce( - singlePartitionTopic, + singlePartitionTopic, new Message { Key = Encoding.UTF8.GetBytes("test key 43"), Value = Encoding.UTF8.GetBytes("test val 43") }, dh2); producer.Flush(TimeSpan.FromSeconds(10)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Await.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Await.cs index 684fccb37..5c09f7157 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Await.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Await.cs @@ -36,7 +36,7 @@ public void Producer_ProduceAsync_Await_Serializing(string bootstrapServers) { LogToFile("start Producer_ProduceAsync_Await_Serializing"); - Func mthd = async () => + Func mthd = async () => { using (var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }).Build()) { @@ -49,7 +49,7 @@ public void Producer_ProduceAsync_Await_Serializing(string bootstrapServers) }; mthd().Wait(); - + Assert.Equal(0, Library.HandleCount); LogToFile("end Producer_ProduceAsync_Await_Serializing"); } @@ -88,7 +88,7 @@ public async Task Producer_ProduceAsync_Await_Throws(string bootstrapServers) using (var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers }).Build()) { await Assert.ThrowsAsync>( - async () => + async () => { await producer.ProduceAsync( new TopicPartition(singlePartitionTopic, 42), @@ -96,7 +96,7 @@ await producer.ProduceAsync( throw new Exception("unexpected exception"); }); } - + // variation 2 Func mthd = async () => diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Error.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Error.cs index 8bc1df92b..02fd7fb6b 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Error.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Error.cs @@ -60,7 +60,7 @@ public void Producer_ProduceAsync_Error(string bootstrapServers) Assert.IsType>(inner); var dr = ((ProduceException)inner).DeliveryResult; var err = ((ProduceException)inner).Error; - + Assert.True(err.IsError); Assert.Equal(PersistenceStatus.NotPersisted, dr.Status); Assert.False(err.IsFatal); @@ -95,7 +95,7 @@ public void Producer_ProduceAsync_Error(string bootstrapServers) Assert.IsType>(inner); var dr = ((ProduceException)inner).DeliveryResult; var err = ((ProduceException)inner).Error; - + Assert.True(err.IsError); Assert.False(err.IsFatal); Assert.Equal(partitionedTopic, dr.Topic); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_HighConcurrency.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_HighConcurrency.cs index 2fed6f488..76aad70c1 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_HighConcurrency.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_HighConcurrency.cs @@ -40,7 +40,7 @@ public void Producer_ProduceAsync_HighConcurrency(string bootstrapServers) ThreadPool.GetMaxThreads(out int originalWorkerThreads, out int originalCompletionPortThreads); - ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); + ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); @@ -56,15 +56,15 @@ public void Producer_ProduceAsync_HighConcurrency(string bootstrapServers) { var tasks = new List(); - int N = workerThreads+2; - for (int i=0; i { Value = "test" })); } Task.WaitAll(tasks.ToArray()); - for (int i=0; i { Value = "test" })); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Null_Task.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Null_Task.cs index b5463cc37..a794f2a8e 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Null_Task.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Null_Task.cs @@ -46,12 +46,12 @@ public void Producer_ProduceAsync_Null_Task(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { drs.Add(producer.ProduceAsync( - new TopicPartition(partitionedTopic, 0), new Message {})); - drs.Add(producer.ProduceAsync(partitionedTopic, new Message {})); + new TopicPartition(partitionedTopic, 0), new Message { })); + drs.Add(producer.ProduceAsync(partitionedTopic, new Message { })); Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } - for (int i=0; i<2; ++i) + for (int i = 0; i < 2; ++i) { var dr = drs[i].Result; Assert.True(dr.Partition == 0 || dr.Partition == 1); @@ -64,19 +64,19 @@ public void Producer_ProduceAsync_Null_Task(string bootstrapServers) } Assert.Equal((Partition)0, drs[0].Result.Partition); - + // byte[] case - + var drs2 = new List>>(); using (var producer = new TestProducerBuilder(producerConfig).Build()) { - drs2.Add(producer.ProduceAsync(new TopicPartition(partitionedTopic, 1), new Message {})); - drs2.Add(producer.ProduceAsync(partitionedTopic, new Message {})); + drs2.Add(producer.ProduceAsync(new TopicPartition(partitionedTopic, 1), new Message { })); + drs2.Add(producer.ProduceAsync(partitionedTopic, new Message { })); Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } - for (int i=0; i<2; ++i) + for (int i = 0; i < 2; ++i) { var dr = drs2[i].Result; Assert.True(dr.Partition == 0 || dr.Partition == 1); @@ -87,9 +87,9 @@ public void Producer_ProduceAsync_Null_Task(string bootstrapServers) Assert.Equal(TimestampType.CreateTime, dr.Message.Timestamp.Type); Assert.True(Math.Abs((DateTime.UtcNow - dr.Message.Timestamp.UtcDateTime).TotalMinutes) < 1.0); } - + Assert.Equal((Partition)1, drs2[0].Result.Partition); - + Assert.Equal(0, Library.HandleCount); LogToFile("end Producer_ProduceAsync_Null_Task"); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Task.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Task.cs index 98edbcb09..96fef0e5c 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Task.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_ProduceAsync_Task.cs @@ -54,7 +54,7 @@ public void Producer_ProduceAsync_Task(string bootstrapServers) Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } - for (int i=0; i<2; ++i) + for (int i = 0; i < 2; ++i) { var dr = drs[i].Result; Assert.Equal(PersistenceStatus.Persisted, dr.Status); @@ -84,14 +84,14 @@ public void Producer_ProduceAsync_Task(string bootstrapServers) Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } - for (int i=0; i<2; ++i) + for (int i = 0; i < 2; ++i) { var dr = drs2[i].Result; Assert.Equal(partitionedTopic, dr.Topic); Assert.True(dr.Offset >= 0); Assert.True(dr.Partition == 0 || dr.Partition == 1); - Assert.Equal($"test key {i+2}", Encoding.UTF8.GetString(dr.Message.Key)); - Assert.Equal($"test val {i+2}", Encoding.UTF8.GetString(dr.Message.Value)); + Assert.Equal($"test key {i + 2}", Encoding.UTF8.GetString(dr.Message.Key)); + Assert.Equal($"test val {i + 2}", Encoding.UTF8.GetString(dr.Message.Value)); Assert.Equal(TimestampType.CreateTime, dr.Message.Timestamp.Type); Assert.True(Math.Abs((DateTime.UtcNow - dr.Message.Timestamp.UtcDateTime).TotalMinutes) < 1.0); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Error.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Error.cs index 6b5818b59..5247a974e 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Error.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Error.cs @@ -39,7 +39,7 @@ public void Producer_Produce_Error(string bootstrapServers) // serializer case. int count = 0; - Action> dh = (DeliveryReport dr) => + Action> dh = (DeliveryReport dr) => { Assert.Equal(ErrorCode.Local_UnknownPartition, dr.Error.Code); Assert.False(dr.Error.IsFatal); @@ -83,7 +83,7 @@ public void Producer_Produce_Error(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { - producer.Produce(new TopicPartition(singlePartitionTopic, 42), new Message { Key = new byte[] { 11 }}, dh2); + producer.Produce(new TopicPartition(singlePartitionTopic, 42), new Message { Key = new byte[] { 11 } }, dh2); producer.Flush(TimeSpan.FromSeconds(10)); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Null.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Null.cs index fffc515d0..218c2e4a3 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Null.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_Null.cs @@ -39,7 +39,7 @@ public void Producer_Produce_Null(string bootstrapServers) // serializer case. int count = 0; - Action> dh = (DeliveryReport dr) => + Action> dh = (DeliveryReport dr) => { Assert.Equal(ErrorCode.NoError, dr.Error.Code); Assert.Equal(PersistenceStatus.Persisted, dr.Status); @@ -56,8 +56,8 @@ public void Producer_Produce_Null(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { - producer.Produce(new TopicPartition(singlePartitionTopic, 0), new Message {}, dh); - producer.Produce(singlePartitionTopic, new Message {}, dh); + producer.Produce(new TopicPartition(singlePartitionTopic, 0), new Message { }, dh); + producer.Produce(singlePartitionTopic, new Message { }, dh); producer.Flush(TimeSpan.FromSeconds(10)); } @@ -82,8 +82,8 @@ public void Producer_Produce_Null(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) { - producer.Produce(new TopicPartition(singlePartitionTopic, 0), new Message {}, dh2); - producer.Produce(singlePartitionTopic, new Message {}, dh2); + producer.Produce(new TopicPartition(singlePartitionTopic, 0), new Message { }, dh2); + producer.Produce(singlePartitionTopic, new Message { }, dh2); producer.Flush(TimeSpan.FromSeconds(10)); } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_SyncOverAsync.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_SyncOverAsync.cs index ee342b554..c28c1676d 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_SyncOverAsync.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Producer_Produce_SyncOverAsync.cs @@ -40,7 +40,7 @@ public void Producer_Produce_SyncOverAsync(string bootstrapServers) ThreadPool.GetMaxThreads(out int originalWorkerThreads, out int originalCompletionPortThreads); - ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); + ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); @@ -48,7 +48,7 @@ public void Producer_Produce_SyncOverAsync(string bootstrapServers) { BootstrapServers = bootstrapServers }; - + using (var tempTopic = new TemporaryTopic(bootstrapServers, 1)) using (var producer = new TestProducerBuilder(pConfig) .SetValueSerializer(new SimpleAsyncSerializer().SyncOverAsync()) @@ -58,8 +58,8 @@ public void Producer_Produce_SyncOverAsync(string bootstrapServers) // will deadlock if N >= workerThreads. Set to max number that // should not deadlock. - int N = workerThreads-1; - for (int i=0; i actionCreator = (taskNumber) => { @@ -67,7 +67,7 @@ public void Producer_Produce_SyncOverAsync(string bootstrapServers) { object waitObj = new object(); - Action> handler = dr => + Action> handler = dr => { Assert.True(dr.Error.Code == ErrorCode.NoError); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/SetSaslCredentials.cs b/test/Confluent.Kafka.IntegrationTests/Tests/SetSaslCredentials.cs index 053315519..109385ee1 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/SetSaslCredentials.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/SetSaslCredentials.cs @@ -39,9 +39,11 @@ public void SetSaslCredentials(string bootstrapServers) using (var producer = new TestProducerBuilder(producerConfig).Build()) CheckSetSaslCredentials(producer); - var consumerConfig = new ConsumerConfig { + var consumerConfig = new ConsumerConfig + { BootstrapServers = bootstrapServers, - GroupId = Guid.NewGuid().ToString() }; + GroupId = Guid.NewGuid().ToString() + }; using (var consumer = new TestConsumerBuilder(consumerConfig).Build()) CheckSetSaslCredentials(consumer); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/SimpleProduceConsume.cs b/test/Confluent.Kafka.IntegrationTests/Tests/SimpleProduceConsume.cs index cbab5abd8..19390696b 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/SimpleProduceConsume.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/SimpleProduceConsume.cs @@ -64,14 +64,14 @@ public void SimpleProduceConsume(string bootstrapServers) ConsumeMessage(consumer, produceResult1, testString1); ConsumeMessage(consumer, produceResult2, testString2); } - + Assert.Equal(0, Library.HandleCount); LogToFile("end SimpleProduceConsume"); } private static void ConsumeMessage(IConsumer consumer, DeliveryResult dr, string testString) { - consumer.Assign(new List() {dr.TopicPartitionOffset}); + consumer.Assign(new List() { dr.TopicPartitionOffset }); var r = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(r?.Message); Assert.Equal(testString, r.Message.Value == null ? null : Encoding.UTF8.GetString(r.Message.Value, 0, r.Message.Value.Length)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Tests.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Tests.cs index a6b2b1ed2..ca75d0bb0 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Tests.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Tests.cs @@ -174,7 +174,8 @@ public static IEnumerable OAuthBearerKafkaParameters() } return oAuthBearerKafkaParameters; } - public static bool semaphoreSkipFlakyTests(){ + public static bool semaphoreSkipFlakyTests() + { string onSemaphore = Environment.GetEnvironmentVariable("SEMAPHORE_SKIP_FLAKY_TESTS"); if (onSemaphore != null) { diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Timestamps.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Timestamps.cs index 0c2f7b0b0..1759654e6 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Timestamps.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Timestamps.cs @@ -53,15 +53,15 @@ public void Timestamps(string bootstrapServers) // --- ProduceAsync, serializer case. drs_task.Add(producer.ProduceAsync( - singlePartitionTopic, + singlePartitionTopic, new Message { Value = "testvalue" }).Result); - + // TimestampType: CreateTime drs_task.Add(producer.ProduceAsync( new TopicPartition(singlePartitionTopic, 0), - new Message - { - Value = "test-value", + new Message + { + Value = "test-value", Timestamp = new Timestamp(new DateTime(2008, 11, 12, 0, 0, 0, DateTimeKind.Utc)) }).Result); @@ -76,21 +76,21 @@ public void Timestamps(string bootstrapServers) new TopicPartition(singlePartitionTopic, 0), new Message { - Value = "test-value", - Timestamp = new Timestamp(DateTime.Now, TimestampType.LogAppendTime) + Value = "test-value", + Timestamp = new Timestamp(DateTime.Now, TimestampType.LogAppendTime) }).Result); // TimestampType: NotAvailable Assert.Throws(() => producer.ProduceAsync( new TopicPartition(singlePartitionTopic, 0), - new Message - { + new Message + { Value = "test-value", Timestamp = new Timestamp(10, TimestampType.NotAvailable) }).Result); - Action> dh + Action> dh = (DeliveryReport dr) => drs_produce.Add(dr); @@ -103,9 +103,9 @@ Action> dh // TimestampType: CreateTime producer.Produce( new TopicPartition(singlePartitionTopic, 0), - new Message - { - Value = "test-value", + new Message + { + Value = "test-value", Timestamp = new Timestamp(new DateTime(2008, 11, 12, 0, 0, 0, DateTimeKind.Utc)) }, dh); @@ -119,19 +119,19 @@ Action> dh // TimestampType: LogAppendTime Assert.Throws(() => producer.Produce( new TopicPartition(singlePartitionTopic, 0), - new Message - { - Value = "test-value", + new Message + { + Value = "test-value", Timestamp = new Timestamp(DateTime.Now, TimestampType.LogAppendTime) - }, + }, dh)); // TimestampType: NotAvailable Assert.Throws(() => producer.Produce( new TopicPartition(singlePartitionTopic, 0), - new Message - { - Value = "test-value", + new Message + { + Value = "test-value", Timestamp = new Timestamp(10, TimestampType.NotAvailable) }, dh)); @@ -210,7 +210,7 @@ Action> dh assertCloseToNow(consumer, drs_task[0].TopicPartitionOffset); - consumer.Assign(new List() {drs_task[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs_task[1].TopicPartitionOffset }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(TimestampType.CreateTime, record.Message.Timestamp.Type); @@ -222,7 +222,7 @@ Action> dh assertCloseToNow(consumer, drs_produce[0].TopicPartitionOffset); - consumer.Assign(new List() {drs_produce[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs_produce[1].TopicPartitionOffset }); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(TimestampType.CreateTime, record.Message.Timestamp.Type); @@ -239,7 +239,7 @@ record = consumer.Consume(TimeSpan.FromSeconds(10)); assertCloseToNow_byte(consumer, drs2_task[0].TopicPartitionOffset); - consumer.Assign(new List() {drs2_task[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs2_task[1].TopicPartitionOffset }); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(TimestampType.CreateTime, record.Message.Timestamp.Type); @@ -251,7 +251,7 @@ record = consumer.Consume(TimeSpan.FromSeconds(10)); assertCloseToNow_byte(consumer, drs2_produce[0].TopicPartitionOffset); - consumer.Assign(new List() {drs2_produce[1].TopicPartitionOffset}); + consumer.Assign(new List() { drs2_produce[1].TopicPartitionOffset }); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(TimestampType.CreateTime, record.Message.Timestamp.Type); @@ -259,14 +259,14 @@ record = consumer.Consume(TimeSpan.FromSeconds(10)); assertCloseToNow_byte(consumer, drs2_produce[2].TopicPartitionOffset); } - + Assert.Equal(0, Library.HandleCount); LogToFile("end Timestamps"); } private static void assertCloseToNow(IConsumer consumer, TopicPartitionOffset tpo) { - consumer.Assign(new List() {tpo}); + consumer.Assign(new List() { tpo }); var cr = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(cr.Message); Assert.Equal(TimestampType.CreateTime, cr.Message.Timestamp.Type); @@ -275,7 +275,7 @@ private static void assertCloseToNow(IConsumer consumer, TopicPart private static void assertCloseToNow_byte(IConsumer consumer, TopicPartitionOffset tpo) { - consumer.Assign(new List() {tpo}); + consumer.Assign(new List() { tpo }); var cr = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(cr.Message); Assert.Equal(TimestampType.CreateTime, cr.Message.Timestamp.Type); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Abort.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Abort.cs index 86458fa0c..72b20da4d 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Abort.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Abort.cs @@ -41,13 +41,15 @@ public void Transactions_Abort(string bootstrapServers) { producer.InitTransactions(defaultTimeout); producer.BeginTransaction(); - producer.Produce(topic.Name, new Message { Key = "test key 0", Value = "test val 0" }, (dr) => { + producer.Produce(topic.Name, new Message { Key = "test key 0", Value = "test val 0" }, (dr) => + { Assert.Equal(0, dr.Offset); }); Thread.Sleep(4000); // ensure the abort ctrl message makes it into the log. producer.AbortTransaction(defaultTimeout); producer.BeginTransaction(); - producer.Produce(topic.Name, new Message { Key = "test key 1", Value = "test val 1" }, (dr) => { + producer.Produce(topic.Name, new Message { Key = "test key 1", Value = "test val 1" }, (dr) => + { // abort marker will be at offset 1. Assert.Equal(2, dr.Offset); }); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Commit.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Commit.cs index b3a5da5ae..c89df8bc5 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Commit.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Commit.cs @@ -58,7 +58,7 @@ public void Transactions_Commit(string bootstrapServers) var cr2 = consumer.Consume(); var cr3 = consumer.Consume(TimeSpan.FromMilliseconds(100)); // force the consumer to read over the final control message internally. Assert.Equal(wm.High, cr1.Offset); - Assert.Equal(wm.High+2, cr2.Offset); // there should be a skipped offset due to a commit marker in the log. + Assert.Equal(wm.High + 2, cr2.Offset); // there should be a skipped offset due to a commit marker in the log. Assert.Null(cr3); // control message should not be exposed to application. // Test that the committed offset accounts for the final ctrl message. @@ -66,7 +66,7 @@ public void Transactions_Commit(string bootstrapServers) } using (var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers, TransactionalId = Guid.NewGuid().ToString() }).Build()) - using (var consumer = new TestConsumerBuilder(new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = "unimportant", EnableAutoCommit = false, AutoOffsetReset=AutoOffsetReset.Latest }).Build()) + using (var consumer = new TestConsumerBuilder(new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = "unimportant", EnableAutoCommit = false, AutoOffsetReset = AutoOffsetReset.Latest }).Build()) { consumer.Assign(new TopicPartition(topic.Name, 0)); diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Statistics.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Statistics.cs index 3e3115ccb..368c93121 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Statistics.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_Statistics.cs @@ -53,7 +53,8 @@ public void Transactions_Statistics(string bootstrapServers) using (var topic = new TemporaryTopic(bootstrapServers, 1)) using (var producer = new TestProducerBuilder(new ProducerConfig { BootstrapServers = bootstrapServers, TransactionalId = Guid.NewGuid().ToString(), LingerMs = 0 }).Build()) using (var consumer = new TestConsumerBuilder(cConfig) - .SetStatisticsHandler((_, json) => { + .SetStatisticsHandler((_, json) => + { var stats = JObject.Parse(json); ls_offset = (int)stats["topics"][topic.Name]["partitions"]["0"]["ls_offset"]; hi_offset = (int)stats["topics"][topic.Name]["partitions"]["0"]["hi_offset"]; @@ -77,7 +78,7 @@ public void Transactions_Statistics(string bootstrapServers) producer.ProduceAsync(topic.Name, new Message { Key = "test", Value = "message2" }).Wait(); producer.ProduceAsync(topic.Name, new Message { Key = "test", Value = "message3" }).Wait(); - for (int i=0; i<10; ++i) + for (int i = 0; i < 10; ++i) { consumer.Consume(TimeSpan.FromMilliseconds(500)); if (done) { break; } diff --git a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_WatermarkOffsets.cs b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_WatermarkOffsets.cs index 4c15014a5..52b45110a 100644 --- a/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_WatermarkOffsets.cs +++ b/test/Confluent.Kafka.IntegrationTests/Tests/Transactions_WatermarkOffsets.cs @@ -50,7 +50,7 @@ public void Transactions_WatermarkOffsets(string bootstrapServers) producer.ProduceAsync(topic.Name, new Message { Key = "test", Value = "message3" }).Wait(); WatermarkOffsets wo2 = new WatermarkOffsets(Offset.Unset, Offset.Unset); - for (int i=0; i<10; ++i) + for (int i = 0; i < 10; ++i) { var cr = consumer.Consume(TimeSpan.FromMilliseconds(500)); wo2 = consumer.GetWatermarkOffsets(new TopicPartition(topic.Name, 0)); @@ -60,7 +60,7 @@ public void Transactions_WatermarkOffsets(string bootstrapServers) producer.CommitTransaction(TimeSpan.FromSeconds(30)); WatermarkOffsets wo3 = new WatermarkOffsets(Offset.Unset, Offset.Unset); - for (int i=0; i<10; ++i) + for (int i = 0; i < 10; ++i) { var cr2 = consumer.Consume(TimeSpan.FromSeconds(500)); wo3 = consumer.GetWatermarkOffsets(new TopicPartition(topic.Name, 0)); diff --git a/test/Confluent.Kafka.IntegrationTests/Util.cs b/test/Confluent.Kafka.IntegrationTests/Util.cs index 44d8ab7ba..4bcfcd9da 100644 --- a/test/Confluent.Kafka.IntegrationTests/Util.cs +++ b/test/Confluent.Kafka.IntegrationTests/Util.cs @@ -36,19 +36,19 @@ public static class Util public static TopicPartitionOffset ProduceNullStringMessages(string bootstrapServers, string topic, int size, int number) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; - + var sb = new StringBuilder(size); - for (int i=0; i firstDeliveryReport = null; using (var producer = new TestProducerBuilder(producerConfig).Build()) { - for (int i=0; i { Value = msg }).Result; Assert.NotNull(dr); diff --git a/test/Confluent.Kafka.SyncOverAsync/Program.cs b/test/Confluent.Kafka.SyncOverAsync/Program.cs index 7898afe31..3eb11d05c 100644 --- a/test/Confluent.Kafka.SyncOverAsync/Program.cs +++ b/test/Confluent.Kafka.SyncOverAsync/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2019 Confluent Inc. +// Copyright 2019 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -58,7 +58,7 @@ class Program { static void Main(string[] args) { - ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); + ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); Console.WriteLine($"ThreadPool workerThreads: {workerThreads}, completionPortThreads: {completionPortThreads}"); @@ -67,10 +67,10 @@ static void Main(string[] args) { BootstrapServers = args[0] }; - + using (var producer = new ProducerBuilder(pConfig) .SetValueSerializer(new SimpleAsyncSerializer().SyncOverAsync()) // may deadlock due to thread pool exhaustion. - // .SetValueSerializer(new SimpleSyncSerializer()) // will never deadlock. + // .SetValueSerializer(new SimpleSyncSerializer()) // will never deadlock. .Build()) { var topic = Guid.NewGuid().ToString(); @@ -78,7 +78,7 @@ static void Main(string[] args) // will deadlock if N >= workerThreads. int N = workerThreads; - for (int i=0; i actionCreator = (taskNumber) => @@ -88,7 +88,7 @@ static void Main(string[] args) Console.WriteLine($"running task {taskNumber}"); object waitObj = new object(); - Action> handler = dr => + Action> handler = dr => { // in a deadlock scenario, the delivery handler will // never execute since execution of the Produce diff --git a/test/Confluent.Kafka.TestsCommon/TestConsumerBuilder.cs b/test/Confluent.Kafka.TestsCommon/TestConsumerBuilder.cs index af2fe80bc..a2c39fc90 100644 --- a/test/Confluent.Kafka.TestsCommon/TestConsumerBuilder.cs +++ b/test/Confluent.Kafka.TestsCommon/TestConsumerBuilder.cs @@ -16,7 +16,8 @@ private static IEnumerable> EditConfig( IEnumerable> config) { var consumerConfig = new ConsumerConfig( - new Dictionary(config)) {}; + new Dictionary(config)) + { }; var groupProtocol = TestConsumerGroupProtocol.GroupProtocol(); if (groupProtocol != null) @@ -26,7 +27,7 @@ private static IEnumerable> EditConfig( GroupProtocol.Consumer; } - + return consumerConfig; } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.TestsCommon/TestConsumerGroupProtocol.cs b/test/Confluent.Kafka.TestsCommon/TestConsumerGroupProtocol.cs index a31c70d64..6b0d18ec5 100644 --- a/test/Confluent.Kafka.TestsCommon/TestConsumerGroupProtocol.cs +++ b/test/Confluent.Kafka.TestsCommon/TestConsumerGroupProtocol.cs @@ -17,4 +17,4 @@ public static string GroupProtocol() return Environment.GetEnvironmentVariable( "TEST_CONSUMER_GROUP_PROTOCOL"); } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.TestsCommon/TestProducerBuilder.cs b/test/Confluent.Kafka.TestsCommon/TestProducerBuilder.cs index b5335aebc..55534152b 100644 --- a/test/Confluent.Kafka.TestsCommon/TestProducerBuilder.cs +++ b/test/Confluent.Kafka.TestsCommon/TestProducerBuilder.cs @@ -16,7 +16,7 @@ private static IEnumerable> EditConfig( { var producerConfig = new ProducerConfig( new Dictionary(config)) - {}; + { }; return producerConfig; } -} \ No newline at end of file +} diff --git a/test/Confluent.Kafka.Transactions/Program.cs b/test/Confluent.Kafka.Transactions/Program.cs index 28e9c871e..b0dcc7694 100644 --- a/test/Confluent.Kafka.Transactions/Program.cs +++ b/test/Confluent.Kafka.Transactions/Program.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Threading.Tasks; using Confluent.Kafka; @@ -40,7 +40,7 @@ static void Main(string[] args) var tasks = new List(); - for (int i=0; i p.Run())); diff --git a/test/Confluent.Kafka.Transactions/TestConsumer.cs b/test/Confluent.Kafka.Transactions/TestConsumer.cs index 0ec53a1af..841802804 100644 --- a/test/Confluent.Kafka.Transactions/TestConsumer.cs +++ b/test/Confluent.Kafka.Transactions/TestConsumer.cs @@ -7,7 +7,7 @@ namespace Confluent.Kafka.Transactions public class TestConsumer { static TimeSpan DefaultTimeout = TimeSpan.FromSeconds(30); - + string bootstrapServers; SimulationConfig conf; @@ -41,7 +41,7 @@ public void Run() if (!lasts.ContainsKey(cr.Message.Key)) { lasts.Add(cr.Message.Key, -1); } if (cr.Message.Value == lasts[cr.Message.Key] + 1) { Console.Write("."); } - else { Console.Write($"[producer {cr.Message.Key} expected seq {lasts[cr.Message.Key]+1} but got {cr.Message.Value}]"); break; } + else { Console.Write($"[producer {cr.Message.Key} expected seq {lasts[cr.Message.Key] + 1} but got {cr.Message.Value}]"); break; } Console.Out.Flush(); lasts[cr.Message.Key] = cr.Message.Value; } diff --git a/test/Confluent.Kafka.Transactions/TestProducer.cs b/test/Confluent.Kafka.Transactions/TestProducer.cs index e9e50a00a..d71a2c128 100644 --- a/test/Confluent.Kafka.Transactions/TestProducer.cs +++ b/test/Confluent.Kafka.Transactions/TestProducer.cs @@ -39,7 +39,7 @@ public void Run() producer.InitTransactions(DefaultTimeout); var currentState = ProducerState.InitState; - for (int i=0; i(() => adminClient.AlterUserScramCredentialsAsync(alterations, options) ); - Assert.Equal("Every alteration must be either a UserScramCredentialDeletion " + + Assert.Equal("Every alteration must be either a UserScramCredentialDeletion " + "or UserScramCredentialUpsertion", ex.Message); } } diff --git a/test/Confluent.Kafka.UnitTests/Admin/ConsumerGroupDescription.cs b/test/Confluent.Kafka.UnitTests/Admin/ConsumerGroupDescription.cs index f5529a4bf..8534635f6 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/ConsumerGroupDescription.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/ConsumerGroupDescription.cs @@ -58,9 +58,9 @@ public void StringRepresentation() } }; Assert.Equal( - @"{""GroupId"": ""test"", ""Error"": ""NoError"", ""IsSimpleConsumerGroup"": true"+ - @", ""PartitionAssignor"": ""testAssignor"", ""State"": ""PreparingRebalance"", ""Coordinator"": null" + - @", ""Members"": [{""ClientId"": ""client1"", ""GroupInstanceId"": null" + + @"{""GroupId"": ""test"", ""Error"": ""NoError"", ""IsSimpleConsumerGroup"": true" + + @", ""PartitionAssignor"": ""testAssignor"", ""State"": ""PreparingRebalance"", ""Coordinator"": null" + + @", ""Members"": [{""ClientId"": ""client1"", ""GroupInstanceId"": null" + @", ""ConsumerId"": ""consumer1"", ""Host"": ""localhost"", ""Assignment"": [{""Topic"": ""test1"", ""Partition"": 0}," + @"{""Topic"": ""test1"", ""Partition"": 1}]}], ""AuthorizedOperations"": [""Create""]}", description.ToString()); @@ -98,10 +98,10 @@ public void StringRepresentation() AuthorizedOperations = null }; Assert.Equal( - @"{""GroupId"": ""test"", ""Error"": ""NoError"", ""IsSimpleConsumerGroup"": true"+ - @", ""PartitionAssignor"": ""testAssignor"", ""State"": ""PreparingRebalance"", ""Coordinator"": " + + @"{""GroupId"": ""test"", ""Error"": ""NoError"", ""IsSimpleConsumerGroup"": true" + + @", ""PartitionAssignor"": ""testAssignor"", ""State"": ""PreparingRebalance"", ""Coordinator"": " + @"{""Id"": 1, ""Host"": ""localhost"", ""Port"": 9092, ""Rack"": null}" + - @", ""Members"": [{""ClientId"": ""client1"", ""GroupInstanceId"": ""groupInstanceId1""" + + @", ""Members"": [{""ClientId"": ""client1"", ""GroupInstanceId"": ""groupInstanceId1""" + @", ""ConsumerId"": ""consumer1"", ""Host"": ""localhost"", ""Assignment"": [" + @"]}], ""AuthorizedOperations"": null}", description.ToString()); diff --git a/test/Confluent.Kafka.UnitTests/Admin/CreateAclReport.cs b/test/Confluent.Kafka.UnitTests/Admin/CreateAclReport.cs index 5d38727bc..a55e99651 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/CreateAclReport.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/CreateAclReport.cs @@ -25,8 +25,8 @@ public class CreateAclReportTests [Fact] public void Equality() { - var res1 = new CreateAclReport {}; - var res2 = new CreateAclReport {}; + var res1 = new CreateAclReport { }; + var res2 = new CreateAclReport { }; var res3 = new CreateAclReport { Error = new Error(ErrorCode.NoError, "Success", false), diff --git a/test/Confluent.Kafka.UnitTests/Admin/CreateAclsError.cs b/test/Confluent.Kafka.UnitTests/Admin/CreateAclsError.cs index 0b3e97a58..a0b071ff9 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/CreateAclsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/CreateAclsError.cs @@ -52,7 +52,8 @@ public class CreateAclsErrorTests private static IList CopyAclBindings(IList original) { - return original.Select((aclBinding) => { + return original.Select((aclBinding) => + { return aclBinding.Clone(); }).ToList().AsReadOnly(); } @@ -146,7 +147,8 @@ public async void InvalidAclBindings() "Invalid principal", "Invalid host", }; - var invalidTests = suffixes.Select((suffix) => { + var invalidTests = suffixes.Select((suffix) => + { return CopyAclBindings(testAclBindings); }).ToList(); invalidTests[0][0].Pattern.Type = ResourceType.Unknown; diff --git a/test/Confluent.Kafka.UnitTests/Admin/CreateTopicsError.cs b/test/Confluent.Kafka.UnitTests/Admin/CreateTopicsError.cs index 65c05c67a..e3ee13920 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/CreateTopicsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/CreateTopicsError.cs @@ -66,7 +66,7 @@ public async void LocalTimeout() // Correct input, fail with timeout var ex = await Assert.ThrowsAsync(() => adminClient.CreateTopicsAsync( - new List{topic}, + new List { topic }, options) ); Assert.Equal("Failed while waiting for controller: Local: Timed out", ex.Message); diff --git a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsError.cs b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsError.cs index 4b806a413..92800cbaa 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsError.cs @@ -62,7 +62,8 @@ public class DeleteAclsErrorTests private static IList CopyAclBindingFilters(IList original) { - return original.Select((aclBinding) => { + return original.Select((aclBinding) => + { return aclBinding.Clone(); }).ToList().AsReadOnly(); } @@ -148,7 +149,8 @@ public async void InvalidAclBindingFilters() "Invalid operation", "Invalid permission type", }; - var invalidTests = suffixes.Select((suffix) => { + var invalidTests = suffixes.Select((suffix) => + { return CopyAclBindingFilters(testAclBindingFilters); }).ToList(); invalidTests[0][0].PatternFilter.Type = ResourceType.Unknown; diff --git a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsReport.cs b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsReport.cs index b8b7bea8b..a84e31d5b 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsReport.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsReport.cs @@ -59,8 +59,8 @@ public void Equality() } }; - var rep1 = new DeleteAclsReport {}; - var rep2 = new DeleteAclsReport {}; + var rep1 = new DeleteAclsReport { }; + var rep2 = new DeleteAclsReport { }; var rep3 = new DeleteAclsReport { Error = new Error(ErrorCode.NoError, "Success", false), diff --git a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsResult.cs b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsResult.cs index a72d88d67..9f6632869 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsResult.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DeleteAclsResult.cs @@ -59,8 +59,8 @@ public void Equality() } }; - var res1 = new DeleteAclsResult {}; - var res2 = new DeleteAclsResult {}; + var res1 = new DeleteAclsResult { }; + var res2 = new DeleteAclsResult { }; var res3 = new DeleteAclsResult { AclBindings = new List diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsError.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsError.cs index 65703fc78..6bed05a79 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsError.cs @@ -119,7 +119,7 @@ public async void LocalTimeout() } } } - + [Fact] public async void InvalidAclBindingFilters() { @@ -132,7 +132,8 @@ public async void InvalidAclBindingFilters() "Invalid operation", "Invalid permission type", }; - var invalidTests = suffixes.Select((suffix) => { + var invalidTests = suffixes.Select((suffix) => + { return testAclBindingFilters[0].Clone(); }).ToList(); invalidTests[0].PatternFilter.Type = ResourceType.Unknown; diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsReport.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsReport.cs index 4bf9b6427..fb46ffa14 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsReport.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsReport.cs @@ -59,8 +59,8 @@ public void Equality() } }; - var rep1 = new DescribeAclsReport {}; - var rep2 = new DescribeAclsReport {}; + var rep1 = new DescribeAclsReport { }; + var rep2 = new DescribeAclsReport { }; var rep3 = new DescribeAclsReport { Error = new Error(ErrorCode.NoError, "Success", false), diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsResult.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsResult.cs index 7a71c543a..5e0c17d7e 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsResult.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeAclsResult.cs @@ -59,8 +59,8 @@ public void Equality() } }; - var res1 = new DescribeAclsResult {}; - var res2 = new DescribeAclsResult {}; + var res1 = new DescribeAclsResult { }; + var res2 = new DescribeAclsResult { }; var res3 = new DescribeAclsResult { AclBindings = new List diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsError.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsError.cs index 29471681b..1b60602fb 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsError.cs @@ -45,7 +45,7 @@ await Assert.ThrowsAsync(() => } } } - + [Fact] public async void EmptyTopicCollection() { @@ -54,14 +54,14 @@ public async void EmptyTopicCollection() foreach (var option in options) { var result = await adminClient.DescribeTopicsAsync( - TopicCollection.OfTopicNames(new List {}), + TopicCollection.OfTopicNames(new List { }), option); Assert.Empty(result.TopicDescriptions); } } } - + [Fact] public async void WrongTopicNames() { @@ -87,7 +87,7 @@ await Assert.ThrowsAsync(() => [Fact] public async void WrongRequestTimeoutValue() { - var topicCollections = TopicCollection.OfTopicNames(new List {}); + var topicCollections = TopicCollection.OfTopicNames(new List { }); var wrongRequestTimeoutValue = new DescribeTopicsOptions { RequestTimeout = TimeSpan.FromSeconds(-1) @@ -104,7 +104,7 @@ await Assert.ThrowsAsync(() => public async void LocalTimeout() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) @@ -113,7 +113,7 @@ public async void LocalTimeout() { var ex = await Assert.ThrowsAsync(() => adminClient.DescribeTopicsAsync( - TopicCollection.OfTopicNames(new List {"test"}), + TopicCollection.OfTopicNames(new List { "test" }), option) ); Assert.Equal("Failed while waiting for controller: Local: Timed out", ex.Message); diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsResult.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsResult.cs index c17a45d12..e4fc11d3e 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsResult.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeTopicsResult.cs @@ -97,7 +97,7 @@ public void StringRepresentation() } } }; - + var expectedString = @"{""TopicDescriptions"": [{""Name"": ""test1"", ""TopicId"": ""AAAAAAAAAAIAAAAAAAAAAw"", ""Error"": ""NoError"", ""IsInternal"": false" + @", ""Partitions"": [{""Partition"": 0, ""Leader"": {""Id"": 0, ""Host"": ""host1"", ""Port"": 9092, ""Rack"": ""rack2""}" + @", ""Replicas"": [{""Id"": 0, ""Host"": ""host2"", ""Port"": 9092, ""Rack"": null}], ""ISR"": []}]" + @@ -106,7 +106,7 @@ public void StringRepresentation() @", ""Partitions"": [{""Partition"": 1, ""Leader"": null, ""Replicas"": []" + @", ""ISR"": [{""Id"": 2, ""Host"": ""host1"", ""Port"": 9093, ""Rack"": ""rack1""}]}]" + @", ""AuthorizedOperations"": null}]}"; - + Assert.Equal( expectedString, description.ToString()); diff --git a/test/Confluent.Kafka.UnitTests/Admin/DescribeUserScramCredentialsError.cs b/test/Confluent.Kafka.UnitTests/Admin/DescribeUserScramCredentialsError.cs index f7eb1e1f8..95cc843a0 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/DescribeUserScramCredentialsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/DescribeUserScramCredentialsError.cs @@ -66,7 +66,7 @@ public async void LocalTimeout() } } } - + [Fact] public async void EmptyUsers() { diff --git a/test/Confluent.Kafka.UnitTests/Admin/ListOffsetsError.cs b/test/Confluent.Kafka.UnitTests/Admin/ListOffsetsError.cs index 02b2f3df5..ebd0c2f01 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/ListOffsetsError.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/ListOffsetsError.cs @@ -36,14 +36,14 @@ public class ListOffsetsErrorTests public async void InvalidRequestTimeout() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) { var ex = await Assert.ThrowsAsync(() => adminClient.ListOffsetsAsync( - new List {}, + new List { }, new ListOffsetsOptions { RequestTimeout = TimeSpan.FromSeconds(-1) @@ -57,7 +57,7 @@ public async void InvalidRequestTimeout() public async void InvalidPartitions() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) @@ -98,7 +98,7 @@ public async void InvalidPartitions() } } } - + [Fact] public async void EmptyPartitions() { @@ -108,7 +108,7 @@ public async void EmptyPartitions() { var result = await adminClient.ListOffsetsAsync( new List - {}, + { }, option); Assert.Empty(result.ResultInfos); } @@ -119,7 +119,7 @@ public async void EmptyPartitions() public async void SamePartitionDifferentOffsets() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) @@ -154,7 +154,7 @@ public async void SamePartitionDifferentOffsets() public async void TwoDifferentPartitions() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) @@ -189,7 +189,7 @@ public async void TwoDifferentPartitions() public async void SinglePartition() { using (var adminClient = new AdminClientBuilder(new AdminClientConfig - { + { BootstrapServers = "localhost:90922", SocketTimeoutMs = 10 }).Build()) diff --git a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialDeletion.cs b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialDeletion.cs index 79ac30c1a..3d34bd09f 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialDeletion.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialDeletion.cs @@ -26,7 +26,8 @@ public class UserScramCredentialDeletionTests public void StringRepresentation() { // Deletion - var deletion = new UserScramCredentialDeletion { + var deletion = new UserScramCredentialDeletion + { User = "test", Mechanism = ScramMechanism.ScramSha256 }; diff --git a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialUpsertion.cs b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialUpsertion.cs index 4f807437a..be33c1929 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialUpsertion.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialUpsertion.cs @@ -42,7 +42,7 @@ public void StringRepresentation() @"{""User"": ""test"", ""ScramCredentialInfo"": " + @"{""Mechanism"": ""ScramSha256"", ""Iterations"": 10000}}", upsertion.ToString()); - + // Empty salt upsertion = new UserScramCredentialUpsertion { diff --git a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialsDescription.cs b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialsDescription.cs index 3e56b669a..97a91cdc5 100644 --- a/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialsDescription.cs +++ b/test/Confluent.Kafka.UnitTests/Admin/UserScramCredentialsDescription.cs @@ -30,7 +30,7 @@ public void StringRepresentation() var description = new UserScramCredentialsDescription { User = "test", - ScramCredentialInfos = new List + ScramCredentialInfos = new List { new ScramCredentialInfo() { @@ -47,7 +47,7 @@ public void StringRepresentation() }; Assert.Equal( @"{""User"": ""test"", ""ScramCredentialInfos"": " + - @"[{""Mechanism"": ""ScramSha256"", ""Iterations"": 10000}, " + + @"[{""Mechanism"": ""ScramSha256"", ""Iterations"": 10000}, " + @"{""Mechanism"": ""ScramSha512"", ""Iterations"": 5000}], ""Error"": ""Success""}", description.ToString()); @@ -55,14 +55,14 @@ public void StringRepresentation() description = new UserScramCredentialsDescription { User = "test", - ScramCredentialInfos = new List + ScramCredentialInfos = new List { }, Error = ErrorCode.ResourceNotFound }; Assert.Equal( @"{""User"": ""test"", ""ScramCredentialInfos"": [], " + - @"""Error"": ""Broker: Request illegally referred to " + + @"""Error"": ""Broker: Request illegally referred to " + @"resource that does not exist""}", description.ToString()); } diff --git a/test/Confluent.Kafka.UnitTests/ConfigEnums.cs b/test/Confluent.Kafka.UnitTests/ConfigEnums.cs index c49118623..cde90e22c 100644 --- a/test/Confluent.Kafka.UnitTests/ConfigEnums.cs +++ b/test/Confluent.Kafka.UnitTests/ConfigEnums.cs @@ -1,4 +1,4 @@ -using Xunit; +using Xunit; namespace Confluent.Kafka.UnitTests { diff --git a/test/Confluent.Kafka.UnitTests/Consumer.cs b/test/Confluent.Kafka.UnitTests/Consumer.cs index 44424d470..8c12e9110 100644 --- a/test/Confluent.Kafka.UnitTests/Consumer.cs +++ b/test/Confluent.Kafka.UnitTests/Consumer.cs @@ -68,20 +68,20 @@ public void Constructor_ConsumerTxn() { // should not throw using (var c = new ConsumerBuilder(new ConsumerConfig - { - BootstrapServers = "localhost:666", - GroupId = Guid.NewGuid().ToString(), - IsolationLevel = IsolationLevel.ReadCommitted - }).Build()) + { + BootstrapServers = "localhost:666", + GroupId = Guid.NewGuid().ToString(), + IsolationLevel = IsolationLevel.ReadCommitted + }).Build()) { } // should not throw using (var c = new ConsumerBuilder(new ConsumerConfig - { - BootstrapServers = "localhost:666", - GroupId = Guid.NewGuid().ToString(), - IsolationLevel = IsolationLevel.ReadUncommitted - }).Build()) + { + BootstrapServers = "localhost:666", + GroupId = Guid.NewGuid().ToString(), + IsolationLevel = IsolationLevel.ReadUncommitted + }).Build()) { } } diff --git a/test/Confluent.Kafka.UnitTests/Headers.cs b/test/Confluent.Kafka.UnitTests/Headers.cs index ddea2edab..990348651 100644 --- a/test/Confluent.Kafka.UnitTests/Headers.cs +++ b/test/Confluent.Kafka.UnitTests/Headers.cs @@ -27,11 +27,11 @@ public class HeadersTests public void Add() { var hdrs = new Headers(); - hdrs.Add("aaa", new byte[] { 32, 42 } ); + hdrs.Add("aaa", new byte[] { 32, 42 }); Assert.Single(hdrs); Assert.Equal("aaa", hdrs[0].Key); - Assert.Equal(new byte[] {32, 42}, hdrs[0].GetValueBytes()); + Assert.Equal(new byte[] { 32, 42 }, hdrs[0].GetValueBytes()); } [Fact] @@ -92,7 +92,7 @@ public void TryGetLast_NotExist() public void NullKey() { var hdrs = new Headers(); - Assert.Throws(() => hdrs.Add(null, new byte[] {})); + Assert.Throws(() => hdrs.Add(null, new byte[] { })); } [Fact] diff --git a/test/Confluent.Kafka.UnitTests/InvalidHandle.cs b/test/Confluent.Kafka.UnitTests/InvalidHandle.cs index cdd6e06d3..9a8297920 100644 --- a/test/Confluent.Kafka.UnitTests/InvalidHandle.cs +++ b/test/Confluent.Kafka.UnitTests/InvalidHandle.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ public void KafkaHandleCreation() SecurityProtocol = SecurityProtocol.Ssl, SslCaLocation = "invalid" }; - + var pConfig = new ProducerConfig { SaslMechanism = SaslMechanism.Plain, diff --git a/test/Confluent.Kafka.UnitTests/MoqExample.cs b/test/Confluent.Kafka.UnitTests/MoqExample.cs index 97f65c3a7..70c542cde 100644 --- a/test/Confluent.Kafka.UnitTests/MoqExample.cs +++ b/test/Confluent.Kafka.UnitTests/MoqExample.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ public void IProducer() // Topic = topic, Partition = 0, Offset = 0, Error = new Error(ErrorCode.NoError), // Message = message // }; - + // // Note: this is a simplification of the actual Producer implementation - // // A good mock would delay invocation of the callback and invoke it on a // // different thread. diff --git a/test/Confluent.Kafka.UnitTests/Serialization/ByteArray.cs b/test/Confluent.Kafka.UnitTests/Serialization/ByteArray.cs index 3acf442b9..7ff79acaa 100644 --- a/test/Confluent.Kafka.UnitTests/Serialization/ByteArray.cs +++ b/test/Confluent.Kafka.UnitTests/Serialization/ByteArray.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2019 Confluent Inc. +// Copyright 2016-2019 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.Kafka.UnitTests/Serialization/Float.cs b/test/Confluent.Kafka.UnitTests/Serialization/Float.cs index 00168a5db..53d18867a 100644 --- a/test/Confluent.Kafka.UnitTests/Serialization/Float.cs +++ b/test/Confluent.Kafka.UnitTests/Serialization/Float.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.Kafka.UnitTests/Serialization/Int.cs b/test/Confluent.Kafka.UnitTests/Serialization/Int.cs index a76e85538..847719776 100644 --- a/test/Confluent.Kafka.UnitTests/Serialization/Int.cs +++ b/test/Confluent.Kafka.UnitTests/Serialization/Int.cs @@ -53,7 +53,7 @@ public void SerializationAgreesWithSystemNetHostToNetworkOrder() Assert.Equal(bytes1.Length, bytes2.Length); - for (int i=0; i(() => (TopicPartitionOffset) tpoe); + Assert.Throws(() => (TopicPartitionOffset)tpoe); } } } diff --git a/test/Confluent.Kafka.UnitTests/TopicPartitionTimestamp.cs b/test/Confluent.Kafka.UnitTests/TopicPartitionTimestamp.cs index e068e4848..ab6b9b42b 100644 --- a/test/Confluent.Kafka.UnitTests/TopicPartitionTimestamp.cs +++ b/test/Confluent.Kafka.UnitTests/TopicPartitionTimestamp.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.Kafka.VerifiableClient/Program.cs b/test/Confluent.Kafka.VerifiableClient/Program.cs index 2e8e5f910..52eac0a74 100644 --- a/test/Confluent.Kafka.VerifiableClient/Program.cs +++ b/test/Confluent.Kafka.VerifiableClient/Program.cs @@ -1,4 +1,4 @@ -// Copyright 2017 Confluent Inc. +// Copyright 2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -40,8 +40,8 @@ public static string SerializeObject(object o) public class LowercaseContractResolver : DefaultContractResolver { protected override string ResolvePropertyName(string propertyName) - => propertyName.Equals("minOffset") || propertyName.Equals("maxOffset") - ? propertyName + => propertyName.Equals("minOffset") || propertyName.Equals("maxOffset") + ? propertyName : propertyName.ToLower(); } } @@ -124,7 +124,7 @@ public class VerifiableClientConfig public VerifiableClientConfig() { this.Conf = new Dictionary - { + { { "log.thread.name", true } }; } @@ -484,7 +484,7 @@ private void Commit(bool immediate) results = null; error = ex.Error; } - + SendOffsetsCommitted(new CommittedOffsets(results, error)); } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/BasicAuth.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/BasicAuth.cs index dc3549576..107a1ba63 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/BasicAuth.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/BasicAuth.cs @@ -28,7 +28,7 @@ public static partial class Tests [Theory, MemberData(nameof(SchemaRegistryParameters))] public static void BasicAuth(Config config) { - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -75,7 +75,7 @@ public static void BasicAuth(Config config) // 1.3. credentials specified as SASL_INHERIT. using (var sr = new CachedSchemaRegistryClient( new Dictionary - { + { { "schema.registry.url", config.ServerWithAuth }, { "schema.registry.basic.auth.credentials.source", "SASL_INHERIT" }, { "sasl.username", config.Username }, @@ -109,7 +109,7 @@ public static void BasicAuth(Config config) Assert.Throws(() => { var sr = new CachedSchemaRegistryClient(new Dictionary - { + { { "schema.registry.url", config.ServerWithAuth }, { "schema.registry.basic.auth.credentials.source", "SASL_INHERIT" }, { "schema.registry.basic.auth.user.info", $"{config.Username:config.Password}" } @@ -119,7 +119,7 @@ public static void BasicAuth(Config config) Assert.Throws(() => { var sr = new CachedSchemaRegistryClient(new Dictionary - { + { { "schema.registry.url", config.ServerWithAuth }, { "schema.registry.basic.auth.credentials.source", "UBUTE_SOURCE" } }); @@ -128,7 +128,7 @@ public static void BasicAuth(Config config) Assert.Throws(() => { var sr = new CachedSchemaRegistryClient(new Dictionary - { + { { "schema.registry.url", config.ServerWithAuth }, { "schema.registry.basic.auth.credentials.source", "NONE" }, { "schema.registry.basic.auth.user.info", $"{config.Username:config.Password}" } @@ -139,8 +139,8 @@ public static void BasicAuth(Config config) // SR <= 5.3.4 returns Unauthorized with empty Content (HttpRequestException) // 5.3.4 < SR <= 5.3.8 returns Unauthorized with message but without error_code (SchemaRegistryException) // SR >= 5.40 returns Unauthorized with message and error_code (SchemaRegistryException) - var schemaRegistryException = Assert.Throws(() => - { + var schemaRegistryException = Assert.Throws(() => + { var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.ServerWithAuth }); var topicName = Guid.NewGuid().ToString(); var subject = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName, null); @@ -157,4 +157,4 @@ public static void BasicAuth(Config config) Assert.Equal("Unauthorized; error code: 401", schemaRegistryException.Message); } } -} \ No newline at end of file +} diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Failover.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Failover.cs index 6f6df986b..cce79e788 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Failover.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Failover.cs @@ -54,8 +54,8 @@ public static void Failover(Config config) { var topicName = Guid.NewGuid().ToString(); var subject = SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName, null); - - Assert.Throws(() => + + Assert.Throws(() => { try { diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetAllSubjects.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetAllSubjects.cs index da98a15e0..30ee464d9 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetAllSubjects.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetAllSubjects.cs @@ -28,7 +28,7 @@ public static void GetAllSubjects(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -50,7 +50,7 @@ public static void GetAllSubjects(Config config) Assert.Equal(subjectsAfter.Count, subjectsAfter2.Count); - Assert.True(subjectsAfter2.Contains(subject)); + Assert.Contains(subject, subjectsAfter2); } } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetId.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetId.cs index 7186033bb..a306ba73d 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetId.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetId.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -40,7 +40,7 @@ public static void GetId(Config config) Assert.Equal(id, id2); - Assert.Throws(() => + Assert.Throws(() => { try { diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetLatestSchema.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetLatestSchema.cs index 829879a4c..43f559848 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetLatestSchema.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetLatestSchema.cs @@ -28,7 +28,7 @@ public static void GetLatestSchema(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -42,7 +42,7 @@ public static void GetLatestSchema(Config config) Assert.Equal(schema.Id, id); Assert.Equal(schema.Subject, subject); - Assert.Equal(schema.Version, 1); + Assert.Equal(1, schema.Version); Assert.Equal(schema.SchemaString, testSchema1); } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaById.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaById.cs index 5ed98a5d7..19c9cef1b 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaById.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaById.cs @@ -27,7 +27,7 @@ public static void GetSchemaById(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -40,7 +40,7 @@ public static void GetSchemaById(Config config) var schema = sr.GetSchemaAsync(id).Result; Assert.Equal(schema.SchemaString, testSchema1); Assert.Empty(schema.References); - Assert.Equal(schema.SchemaType, SchemaType.Avro); + Assert.Equal(SchemaType.Avro, schema.SchemaType); } } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaBySubjectAndVersion.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaBySubjectAndVersion.cs index 18fe0d4a8..dfb35ec59 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaBySubjectAndVersion.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSchemaBySubjectAndVersion.cs @@ -27,7 +27,7 @@ public static void GetSchemaBySubjectAndVersion(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -41,7 +41,7 @@ public static void GetSchemaBySubjectAndVersion(Config config) var schema = sr.GetRegisteredSchemaAsync(subject, latestSchema.Version).Result; Assert.Equal(schema.SchemaString, testSchema1); - Assert.Equal(schema.SchemaType, SchemaType.Avro); + Assert.Equal(SchemaType.Avro, schema.SchemaType); Assert.Empty(schema.References); } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSubjectVersions.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSubjectVersions.cs index 60db173a5..48b5727c1 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSubjectVersions.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/GetSubjectVersions.cs @@ -1,4 +1,4 @@ -// Copyright 20 Confluent Inc. +// Copyright 20 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ public static void GetSubjectVersions(Config config) var versions = sr.GetSubjectVersionsAsync(subject).Result; - Assert.Equal(versions.Count, 2); + Assert.Equal(2, versions.Count); } } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/IsCompatible.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/IsCompatible.cs index 1d76767a0..4b9b9767a 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/IsCompatible.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/IsCompatible.cs @@ -27,7 +27,7 @@ public static void IsCompatible_Topic(Config config) { var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Json.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Json.cs index 39e47ae0e..b3c276340 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Json.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Json.cs @@ -65,7 +65,7 @@ public static void Json(Config config) var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); var topicName = Guid.NewGuid().ToString(); var subjectInitial = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName, null); - var subject = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName+"2", null); + var subject = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName + "2", null); var id1 = srInitial.RegisterSchemaAsync(subjectInitial, new Schema(TestJsonSchema, SchemaType.Json)).Result; var schema1 = sr.GetSchemaAsync(id1).Result; // use a different sr instance to ensure a cached value is not read. @@ -85,7 +85,7 @@ public static void Json(Config config) // compatibility var compat = sr.IsCompatibleAsync(subject, schema2).Result; Assert.True(compat); - var avroSchema = + var avroSchema = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/JsonWithReferences.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/JsonWithReferences.cs index 060a11d6a..4ed95159c 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/JsonWithReferences.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/JsonWithReferences.cs @@ -46,7 +46,7 @@ public static partial class Tests } }"; - private static string S2 = @" + private static string S2 = @" { ""$schema"": ""http://json-schema.org/draft-07/schema#"", ""$id"": ""http://example.com/product.schema.json"", diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Protobuf.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Protobuf.cs index eb0b4c05b..1a0fa4b63 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Protobuf.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Protobuf.cs @@ -30,7 +30,7 @@ public static void Protobuf(Config config) var testSchemaBase64 = Confluent.Kafka.Examples.Protobuf.User.Descriptor.File.SerializedData.ToBase64(); var topicName = Guid.NewGuid().ToString(); var subjectInitial = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName, null); - var subject = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName+"2", null); + var subject = SubjectNameStrategy.Topic.ConstructValueSubjectName(topicName + "2", null); // check that registering a base64 protobuf schema works. var id1 = srInitial.RegisterSchemaAsync(subjectInitial, new Schema(testSchemaBase64, SchemaType.Protobuf)).Result; @@ -71,7 +71,7 @@ public static void Protobuf(Config config) // compatibility var compat = sr.IsCompatibleAsync(subject, schema2).Result; Assert.True(compat); - var avroSchema = + var avroSchema = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -81,8 +81,9 @@ public static void Protobuf(Config config) Assert.False(compat3); // invalid type - Assert.ThrowsAny(() => { - sr.RegisterSchemaAsync(SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName+"3", null), new Schema(avroSchema, SchemaType.Protobuf)).Wait(); + Assert.ThrowsAny(() => + { + sr.RegisterSchemaAsync(SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName + "3", null), new Schema(avroSchema, SchemaType.Protobuf)).Wait(); }); } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/ProtobufWithReferences.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/ProtobufWithReferences.cs index 0ad4882ab..b0dce8246 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/ProtobufWithReferences.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/ProtobufWithReferences.cs @@ -39,4 +39,4 @@ public static void ProtobufWithReferences(Config config) var sc = srInitial.GetSchemaAsync(id1).Result; } } -} \ No newline at end of file +} diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterIncompatibleSchema.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterIncompatibleSchema.cs index 5ff926f6b..e96a9c6a9 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterIncompatibleSchema.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterIncompatibleSchema.cs @@ -28,7 +28,7 @@ public static void RegisterIncompatibleSchema(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -47,7 +47,7 @@ public static void RegisterIncompatibleSchema(Config config) Assert.Throws(() => sr.RegisterSchemaAsync(subject, testSchema2).Result); - Assert.True(sr.GetAllSubjectsAsync().Result.Contains(subject)); + Assert.Contains(subject, sr.GetAllSubjectsAsync().Result); } } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterNormalizedSchema.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterNormalizedSchema.cs index 46e0a7a45..003a7a83d 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterNormalizedSchema.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterNormalizedSchema.cs @@ -28,11 +28,11 @@ public static void RegisterNormalizedSchema(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":{\"type\": \"string\"}},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; - var normalized = + var normalized = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterSameSchemaTwice.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterSameSchemaTwice.cs index 806b5d2ae..4825d88ae 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterSameSchemaTwice.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/RegisterSameSchemaTwice.cs @@ -28,7 +28,7 @@ public static void RegisterSameSchemaTwice(Config config) { var topicName = Guid.NewGuid().ToString(); - var testSchema1 = + var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; @@ -42,8 +42,8 @@ public static void RegisterSameSchemaTwice(Config config) var id2 = sr.RegisterSchemaAsync(subject, testSchema1).Result; Assert.Equal(id1, id2); - - Assert.True(sr.GetAllSubjectsAsync().Result.Contains(subject)); + + Assert.Contains(subject, sr.GetAllSubjectsAsync().Result); } } } diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Tests.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Tests.cs index 71aeb8fa5..12a749922 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Tests.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/Tests.cs @@ -57,7 +57,8 @@ public static IEnumerable SchemaRegistryParameters() } return schemaRegistryParameters; } - public static bool semaphoreSkipFlakyTests(){ + public static bool semaphoreSkipFlakyTests() + { string onSemaphore = Environment.GetEnvironmentVariable("SEMAPHORE_SKIP_FLAKY_TESTS"); if (onSemaphore != null) { diff --git a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/UpdateCompatibility.cs b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/UpdateCompatibility.cs index de07b9d92..5bb0ca017 100644 --- a/test/Confluent.SchemaRegistry.IntegrationTests/Tests/UpdateCompatibility.cs +++ b/test/Confluent.SchemaRegistry.IntegrationTests/Tests/UpdateCompatibility.cs @@ -6,36 +6,36 @@ namespace Confluent.SchemaRegistry.IntegrationTests; public static partial class Tests { - [Theory, MemberData(nameof(SchemaRegistryParameters))] - public static async Task UpdateCompatibility(Config config) - { - var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); + [Theory, MemberData(nameof(SchemaRegistryParameters))] + public static async Task UpdateCompatibility(Config config) + { + var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); - // Case 1: Subject is not specified + // Case 1: Subject is not specified - var globalCompatibility = await sr.UpdateCompatibilityAsync(Compatibility.BackwardTransitive); - Assert.Equal(Compatibility.BackwardTransitive, globalCompatibility); + var globalCompatibility = await sr.UpdateCompatibilityAsync(Compatibility.BackwardTransitive); + Assert.Equal(Compatibility.BackwardTransitive, globalCompatibility); - Assert.Equal(Compatibility.BackwardTransitive, await sr.GetCompatibilityAsync()); + Assert.Equal(Compatibility.BackwardTransitive, await sr.GetCompatibilityAsync()); - // Case 2: Subject is specified + // Case 2: Subject is specified - var testSchema = - "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + - "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + - "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; + var testSchema = + "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; - var topicName = Guid.NewGuid().ToString(); - var subject = - SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName, "Confluent.Kafka.Examples.AvroSpecific.User"); + var topicName = Guid.NewGuid().ToString(); + var subject = + SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName, "Confluent.Kafka.Examples.AvroSpecific.User"); - await sr.RegisterSchemaAsync(subject, testSchema); + await sr.RegisterSchemaAsync(subject, testSchema); - var compatibility = await sr.UpdateCompatibilityAsync(Compatibility.FullTransitive, subject); - Assert.Equal(Compatibility.FullTransitive, compatibility); + var compatibility = await sr.UpdateCompatibilityAsync(Compatibility.FullTransitive, subject); + Assert.Equal(Compatibility.FullTransitive, compatibility); - Assert.Equal(Compatibility.FullTransitive, await sr.GetCompatibilityAsync(subject)); - Assert.Equal(Compatibility.BackwardTransitive, await sr.GetCompatibilityAsync()); - } -} \ No newline at end of file + Assert.Equal(Compatibility.FullTransitive, await sr.GetCompatibilityAsync(subject)); + Assert.Equal(Compatibility.BackwardTransitive, await sr.GetCompatibilityAsync()); + } +} diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/TemporaryTopic.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/TemporaryTopic.cs index 0a7bd09e6..470ccd4a8 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/TemporaryTopic.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/TemporaryTopic.cs @@ -25,7 +25,7 @@ namespace Confluent.SchemaRegistry.Serdes.IntegrationTests public class TemporaryTopic : IDisposable { private string bootstrapServers; - + public string Name { get; set; } public TemporaryTopic(string bootstrapServers, int numPartitions) diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests.cs index 04579ad1d..b07b9e980 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests.cs @@ -1,4 +1,4 @@ -// Copyright 2016-2017 Confluent Inc. +// Copyright 2016-2017 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AutoRegisterSchemaDisabled.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AutoRegisterSchemaDisabled.cs index f475a9e8c..8cb9580d9 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AutoRegisterSchemaDisabled.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AutoRegisterSchemaDisabled.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -140,7 +140,7 @@ public static void AutoRegisterSchemaDisabled(string bootstrapServers, string sc using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryServers })) using (var producer = new ProducerBuilder(producerConfig) - .SetKeySerializer(new AvroSerializer(schemaRegistry, new AvroSerializerConfig { AutoRegisterSchemas = false, UseLatestVersion = true})) + .SetKeySerializer(new AvroSerializer(schemaRegistry, new AvroSerializerConfig { AutoRegisterSchemas = false, UseLatestVersion = true })) .SetValueSerializer(new AvroSerializer(schemaRegistry)) .Build()) { diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AvroAndRegular.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AvroAndRegular.cs index 3e2167b8d..55dc346df 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AvroAndRegular.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/AvroAndRegular.cs @@ -32,7 +32,7 @@ public static void AvoAndRegular(string bootstrapServers, string schemaRegistryS { using (var topic1 = new TemporaryTopic(bootstrapServers, 1)) using (var topic2 = new TemporaryTopic(bootstrapServers, 1)) - { + { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers @@ -134,7 +134,7 @@ public static void AvoAndRegular(string bootstrapServers, string schemaRegistryS .Build()) { consumer.Assign(new TopicPartitionOffset(topic2.Name, 0, 0)); - Assert.ThrowsAny(() => + Assert.ThrowsAny(() => { try { @@ -170,4 +170,4 @@ public static void AvoAndRegular(string bootstrapServers, string schemaRegistryS } } } -} \ No newline at end of file +} diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ConsumeIncompatibleTypes.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ConsumeIncompatibleTypes.cs index dcbcc5dbd..87f67c2ee 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ConsumeIncompatibleTypes.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ConsumeIncompatibleTypes.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ public static void ConsumeIncompatibleTypes(string bootstrapServers, string sche SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest }; - + var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryServers diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/PrimitiveTypes.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/PrimitiveTypes.cs index 4d53f157b..27d719fd1 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/PrimitiveTypes.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/PrimitiveTypes.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -74,7 +74,7 @@ public static void PrimitiveTypes(string bootstrapServers, string schemaRegistry .Build()) { producer - .ProduceAsync(bytesTopic, new Message { Key = new byte[] { 1, 4, 11 }, Value = new byte[] {} }) + .ProduceAsync(bytesTopic, new Message { Key = new byte[] { 1, 4, 11 }, Value = new byte[] { } }) .Wait(); Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } @@ -146,7 +146,7 @@ public static void PrimitiveTypes(string bootstrapServers, string schemaRegistry .Build()) { producer - .ProduceAsync(nullTopic, new Message()) + .ProduceAsync(nullTopic, new Message()) .Wait(); Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsume.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsume.cs index ba3a7e489..e594b00e1 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsume.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsume.cs @@ -83,7 +83,7 @@ private static void ProduceConsume(string bootstrapServers, string schemaRegistr favorite_number = i, favorite_color = "blue" }; - + producer .ProduceAsync(topic, new Message { Key = user.name, Value = user }) .Wait(); diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsumeGeneric.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsumeGeneric.cs index 79d9e99d0..1baec67d5 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsumeGeneric.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceConsumeGeneric.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -89,7 +89,7 @@ private static void ProduceConsumeGeneric(string bootstrapServers, string schema favorite_number = 47, favorite_color = "orange" }; - + p.ProduceAsync(topic, new Message { Key = user }).Wait(); } @@ -182,7 +182,7 @@ record = consumer.Consume(new CancellationTokenSource(TimeSpan.FromSeconds(10)). } } } - + /// /// Test that messages produced with the Avro serializer can be consumed with the /// Avro deserializer (topic name strategy). diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceGenericMultipleTopics.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceGenericMultipleTopics.cs index 0e56eb147..9c686ce41 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceGenericMultipleTopics.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceGenericMultipleTopics.cs @@ -1,4 +1,4 @@ -using System; +using System; using Confluent.Kafka; using Avro; using Avro.Generic; diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceIncompatibleTypes.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceIncompatibleTypes.cs index f9232b8a2..19c5c6ef2 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceIncompatibleTypes.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/ProduceIncompatibleTypes.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -90,7 +90,7 @@ public static void ProduceIncompatibleTypes(string bootstrapServers, string sche .SetKeySerializer(new AvroSerializer(schemaRegistry)) .SetValueSerializer(new AvroSerializer(schemaRegistry)) .Build()) - { + { Assert.Throws(() => { try diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/SyncOverAsync.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/SyncOverAsync.cs index 0cdf6fd2e..1b15394a8 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/SyncOverAsync.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Avro/SyncOverAsync.cs @@ -39,7 +39,7 @@ public static void SyncOverAsync(string bootstrapServers, string schemaRegistryS { ThreadPool.GetMaxThreads(out int originalWorkerThreads, out int originalCompletionPortThreads); - ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); + ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); @@ -47,7 +47,7 @@ public static void SyncOverAsync(string bootstrapServers, string schemaRegistryS { BootstrapServers = bootstrapServers }; - + var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryServers @@ -63,8 +63,8 @@ public static void SyncOverAsync(string bootstrapServers, string schemaRegistryS // will deadlock if N >= workerThreads. Set to max number that // should not deadlock. - int N = workerThreads-1; - for (int i=0; i actionCreator = (taskNumber) => { @@ -72,7 +72,7 @@ public static void SyncOverAsync(string bootstrapServers, string schemaRegistryS { object waitObj = new object(); - Action> handler = dr => + Action> handler = dr => { Assert.True(dr.Error.Code == ErrorCode.NoError); diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/ProduceConsumeMixedJson.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/ProduceConsumeMixedJson.cs index 22226de38..465f6a888 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/ProduceConsumeMixedJson.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/ProduceConsumeMixedJson.cs @@ -93,7 +93,8 @@ public static void ProduceConsumeMixedJson(string bootstrapServers, string schem LastName = "User", NumberWithRange = 7 // range should be between 2 and 5. }; - Assert.Throws>(() => { + Assert.Throws>(() => + { try { producer.ProduceAsync(topic.Name, new Message { Key = "test1", Value = p }).Wait(); @@ -114,7 +115,8 @@ public static void ProduceConsumeMixedJson(string bootstrapServers, string schem // Omit LastName NumberWithRange = 3 }; - Assert.Throws(() => { + Assert.Throws(() => + { producer.ProduceAsync(topic.Name, new Message { Key = "test1", Value = p }).Wait(); }); } diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseLatestVersionEnabled.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseLatestVersionEnabled.cs index 01e91a3d2..b63a3802f 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseLatestVersionEnabled.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseLatestVersionEnabled.cs @@ -43,13 +43,13 @@ public static partial class Tests /// Test Use Latest Version on when AutoRegister enabled and disabled. /// [Theory, MemberData(nameof(TestParameters))] - public static void UseLatestVersionCheck(string bootstrapServers, string schemaRegistryServers) + public static void UseLatestVersionCheck(string bootstrapServers, string schemaRegistryServers) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryServers }; using (var topic = new TemporaryTopic(bootstrapServers, 1)) - using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) + using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) { using (var producer = new ProducerBuilder(producerConfig) @@ -60,7 +60,7 @@ public static void UseLatestVersionCheck(string bootstrapServers, string schemaR producer.ProduceAsync(topic.Name, new Message { Key = "test1", Value = c }).Wait(); } - using (var producer = + using (var producer = new ProducerBuilder(producerConfig) .SetValueSerializer(new JsonSerializer( schemaRegistry, new JsonSerializerConfig { UseLatestVersion = true, AutoRegisterSchemas = false, LatestCompatibilityStrict = true })) diff --git a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseReferences.cs b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseReferences.cs index fa5047cce..e75f39115 100644 --- a/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseReferences.cs +++ b/test/Confluent.SchemaRegistry.Serdes.IntegrationTests/Tests_Json/UseReferences.cs @@ -31,29 +31,29 @@ public static partial class Tests { class Order { - public DateTime OrderDate {get; set;} - - public OrderDetails OrderDetails {get; set;} + public DateTime OrderDate { get; set; } + + public OrderDetails OrderDetails { get; set; } } - + class OrderDetails { - public int Id {get; set;} - - public Customer Customer {get; set;} - - public string PaymentId {get; set;} + public int Id { get; set; } + + public Customer Customer { get; set; } + + public string PaymentId { get; set; } } - + class Customer { - public int Id {get; set;} + public int Id { get; set; } + + public string Name { get; set; } - public string Name {get; set;} - - public string Email {get; set;} + public string Email { get; set; } } - + private static string Schema1 = @" { ""$schema"": ""http://json-schema.org/draft-07/schema#"", @@ -185,7 +185,7 @@ public static void UseReferences(string bootstrapServers, string schemaRegistryS }, OrderDate = DateTime.UtcNow }; - + using (var producer = new ProducerBuilder(producerConfig) .SetValueSerializer(new JsonSerializer(schemaRegistry, s2.Schema, @@ -194,7 +194,7 @@ public static void UseReferences(string bootstrapServers, string schemaRegistryS { producer.ProduceAsync(topic.Name, new Message { Key = "test1", Value = order }).Wait(); } - + using (var consumer = new ConsumerBuilder(consumerConfig) .SetValueDeserializer(new JsonDeserializer(sr, s2.Schema, @@ -211,7 +211,7 @@ public static void UseReferences(string bootstrapServers, string schemaRegistryS var serializedString = Newtonsoft.Json.JsonConvert.SerializeObject(order, jsonSchemaGeneratorSettings.ActualSerializerSettings); var jsonObject = JObject.Parse(serializedString); - + using (var producer = new ProducerBuilder(producerConfig) .SetValueSerializer(new JsonSerializer(schemaRegistry, s2.Schema, @@ -220,7 +220,7 @@ public static void UseReferences(string bootstrapServers, string schemaRegistryS { producer.ProduceAsync(topic.Name, new Message { Key = "test1", Value = jsonObject }).Wait(); } - + using (var consumer = new ConsumerBuilder(consumerConfig) .SetValueDeserializer(new JsonDeserializer(sr, s2.Schema, diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/BaseSerializeDeserialize.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/BaseSerializeDeserialize.cs index 503e70efe..0d0a573a2 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/BaseSerializeDeserialize.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/BaseSerializeDeserialize.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,14 +71,14 @@ public BaseSerializeDeserializeTests() (string subject, IDictionary metadata, bool ignoreDeleted) => { return subjectStore[subject].First(x => - x.Metadata != null - && x.Metadata.Properties != null + x.Metadata != null + && x.Metadata.Properties != null && metadata.Keys.All(k => x.Metadata.Properties.ContainsKey(k) && x.Metadata.Properties[k] == metadata[k]) ); } ); schemaRegistryClient = schemaRegistryMock.Object; - + var dekRegistryMock = new Mock(); dekRegistryMock.Setup(x => x.CreateKekAsync(It.IsAny())).ReturnsAsync( (Kek kek) => @@ -153,7 +153,7 @@ public BaseSerializeDeserializeTests() return dekStore.TryGetValue(dekId, out RegisteredDek registeredDek) ? registeredDek : null; }); dekRegistryClient = dekRegistryMock.Object; - + var clockMock = new Mock(); clockMock.Setup(x => x.NowToUnixTimeMilliseconds()).Returns(() => now); clock = clockMock.Object; diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/BuiltinFunctions.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/BuiltinFunctions.cs index b112606e4..e322133ed 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/BuiltinFunctions.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/BuiltinFunctions.cs @@ -126,4 +126,4 @@ public void UuidSuccess() { Assert.True(BuiltinOverload.ValidateUuid("fa02a430-892f-4160-97cd-6e3d1bc14494")); } -} \ No newline at end of file +} diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/Configuration.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/Configuration.cs index 4bd253e35..c3adff3f8 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/Configuration.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/Configuration.cs @@ -1,4 +1,4 @@ -// Copyright 2018-2020 Confluent Inc. +// Copyright 2018-2020 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -109,7 +109,7 @@ public void DeserializerUnexpectedAvroConfigParam() { "avro.serializer.auto.register.schemas", "false" } }; - Assert.Throws(() => { var avroDeserializer = new AvroDeserializer(null, config); }); + Assert.Throws(() => { var avroDeserializer = new AvroDeserializer(null, config); }); } } } diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/JsonSerializeDeserialize.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/JsonSerializeDeserialize.cs index c5327be09..316145d30 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/JsonSerializeDeserialize.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/JsonSerializeDeserialize.cs @@ -69,9 +69,9 @@ public class JsonSerializeDeserializeTests : BaseSerializeDeserializeTests public class Schema1 { public string Field1 { get; set; } - + public int Field2 { get; set; } - + public bool Field3 { get; set; } } @@ -218,15 +218,15 @@ public async Task WithJsonSchemaExternalReferencesAsync() var refs = new List { new SchemaReference("schema2.json", subject2, 1) }; var registeredSchema1 = new RegisteredSchema(subject1, 1, 2, schema1, SchemaType.Json, refs); store[schema1] = 2; - subjectStore[subject1] = new List { registeredSchema1 }; - + subjectStore[subject1] = new List { registeredSchema1 }; + var jsonSerializerConfig = new JsonSerializerConfig { UseLatestVersion = true, AutoRegisterSchemas = false, SubjectNameStrategy = SubjectNameStrategy.TopicRecord }; - + var jsonSchemaGeneratorSettings = new JsonSchemaGeneratorSettings { SerializerSettings = new JsonSerializerSettings @@ -237,7 +237,7 @@ public async Task WithJsonSchemaExternalReferencesAsync() } } }; - + var jsonSerializer = new JsonSerializer(schemaRegistryClient, registeredSchema1, jsonSerializerConfig, jsonSchemaGeneratorSettings); var jsonDeserializer = new JsonDeserializer(schemaRegistryClient, registeredSchema1); @@ -358,12 +358,12 @@ public void CELCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new JsonSerializerConfig { AutoRegisterSchemas = false, @@ -409,12 +409,12 @@ public void CELConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new JsonSerializerConfig { AutoRegisterSchemas = false, @@ -454,12 +454,12 @@ public void CELFieldTransform() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, "typeName == 'STRING' ; value + '-suffix'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new JsonSerializerConfig { AutoRegisterSchemas = false, @@ -505,12 +505,12 @@ public void CELFieldCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new JsonSerializerConfig { AutoRegisterSchemas = false, @@ -556,12 +556,12 @@ public void CELFieldConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new JsonSerializerConfig { AutoRegisterSchemas = false, @@ -601,10 +601,10 @@ public void FieldEncryption() var schema = new RegisteredSchema("topic-value", 1, 1, schemaStr, SchemaType.Json, null); schema.Metadata = new Metadata(new Dictionary> - { - ["$.name"] = new HashSet { "PII" } + { + ["$.name"] = new HashSet { "PII" } - }, new Dictionary(), new HashSet() + }, new Dictionary(), new HashSet() ); schema.RuleSet = new RuleSet(new List(), new List @@ -859,7 +859,7 @@ class Customer [JsonProperty("name")] public string Name { get; set; } } - + class NewCustomer { [JsonProperty("favorite_color")] @@ -869,7 +869,7 @@ class NewCustomer [JsonProperty("full_name")] public string FullName { get; set; } } - + class NewerCustomer { [JsonProperty("favorite_color")] diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/ProtoSerializeDeserialize.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/ProtoSerializeDeserialize.cs index 14d66fc3b..3eaa44269 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/ProtoSerializeDeserialize.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/ProtoSerializeDeserialize.cs @@ -50,7 +50,7 @@ message ReferrerMessage { ReferencedMessage ref = 2 [(.confluent.field_meta).annotation = ""PII""]; }"; - + string import = @"syntax = ""proto3""; package io.confluent.kafka.serializers.protobuf.test; @@ -112,12 +112,12 @@ message Person { schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -159,12 +159,12 @@ message Person { schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -200,12 +200,12 @@ message Person { schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, "typeName == 'STRING' ; value + '-suffix'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -247,12 +247,12 @@ message Person { schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -294,12 +294,12 @@ message Person { schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -332,14 +332,14 @@ message PersonWithPic { string name = 3 [(.confluent.field_meta) = { tags: ""PII"" }]; bytes picture = 4 [(.confluent.field_meta) = { tags: ""PII"" }]; }"; - + var schema = new RegisteredSchema("topic-value", 1, 1, schemaStr, SchemaType.Protobuf, null); schema.Metadata = new Metadata(new Dictionary> - { - ["example.PersonWithPic.name"] = new HashSet { "PII" }, - ["example.PersonWithPic.picture"] = new HashSet { "PII" } + { + ["example.PersonWithPic.name"] = new HashSet { "PII" }, + ["example.PersonWithPic.picture"] = new HashSet { "PII" } - }, new Dictionary(), new HashSet() + }, new Dictionary(), new HashSet() ); schema.RuleSet = new RuleSet(new List(), new List @@ -356,7 +356,7 @@ message PersonWithPic { } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new ProtobufSerializerConfig { AutoRegisterSchemas = false, @@ -364,8 +364,8 @@ message PersonWithPic { }; config.Set("rules.secret", "mysecret"); IRuleExecutor ruleExecutor = new FieldEncryptionExecutor(dekRegistryClient, clock); - var serializer = new ProtobufSerializer(schemaRegistryClient, config, new List{ ruleExecutor}); - var deserializer = new ProtobufDeserializer(schemaRegistryClient, null, new List{ ruleExecutor}); + var serializer = new ProtobufSerializer(schemaRegistryClient, config, new List { ruleExecutor }); + var deserializer = new ProtobufDeserializer(schemaRegistryClient, null, new List { ruleExecutor }); var pic = new byte[] { 1, 2, 3 }; diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/SerializeDeserialize.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/SerializeDeserialize.cs index c4dc07dbb..ba9a31a0a 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/SerializeDeserialize.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/SerializeDeserialize.cs @@ -1,4 +1,4 @@ -// Copyright 2018 Confluent Inc. +// Copyright 2018 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -136,7 +136,7 @@ public void ISpecificRecord() Assert.Equal(user.name, result.name); Assert.Equal(user.favorite_color, result.favorite_color); Assert.Equal(user.favorite_number, result.favorite_number); - + // serialize second object user = new User { @@ -161,12 +161,12 @@ public void ISpecificRecordCELCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -199,12 +199,12 @@ public void ISpecificRecordCELConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -231,12 +231,12 @@ public void ISpecificRecordCELFieldTransform() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, "typeName == 'STRING' ; value + '-suffix'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -269,12 +269,12 @@ public void ISpecificRecordCELFieldCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -307,12 +307,12 @@ public void ISpecificRecordCELFieldConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -341,11 +341,11 @@ public void ISpecificRecordFieldEncryption() var schema = new RegisteredSchema("topic-value", 1, 1, schemaStr, SchemaType.Avro, null); schema.Metadata = new Metadata(new Dictionary> - { - ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.name"] = new HashSet { "PII" }, - ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.picture"] = new HashSet { "PII" } + { + ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.name"] = new HashSet { "PII" }, + ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.picture"] = new HashSet { "PII" } - }, new Dictionary(), new HashSet() + }, new Dictionary(), new HashSet() ); schema.RuleSet = new RuleSet(new List(), new List @@ -370,8 +370,8 @@ public void ISpecificRecordFieldEncryption() }; config.Set("rules.secret", "mysecret"); IRuleExecutor ruleExecutor = new FieldEncryptionExecutor(dekRegistryClient, clock); - var serializer = new AvroSerializer(schemaRegistryClient, config, new List{ ruleExecutor}); - var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List{ ruleExecutor}); + var serializer = new AvroSerializer(schemaRegistryClient, config, new List { ruleExecutor }); + var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List { ruleExecutor }); var pic = new byte[] { 1, 2, 3 }; var user = new UserWithPic() @@ -403,10 +403,10 @@ public void ISpecificRecordFieldEncryptionDekRotation() var schema = new RegisteredSchema("topic-value", 1, 1, schemaStr, SchemaType.Avro, null); schema.Metadata = new Metadata(new Dictionary> - { - ["Confluent.Kafka.Examples.AvroSpecific.User.name"] = new HashSet { "PII" }, + { + ["Confluent.Kafka.Examples.AvroSpecific.User.name"] = new HashSet { "PII" }, - }, new Dictionary(), new HashSet() + }, new Dictionary(), new HashSet() ); schema.RuleSet = new RuleSet(new List(), new List @@ -424,7 +424,7 @@ public void ISpecificRecordFieldEncryptionDekRotation() } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -432,8 +432,8 @@ public void ISpecificRecordFieldEncryptionDekRotation() }; config.Set("rules.secret", "mysecret"); IRuleExecutor ruleExecutor = new FieldEncryptionExecutor(dekRegistryClient, clock); - var serializer = new AvroSerializer(schemaRegistryClient, config, new List{ ruleExecutor}); - var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List{ ruleExecutor}); + var serializer = new AvroSerializer(schemaRegistryClient, config, new List { ruleExecutor }); + var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List { ruleExecutor }); var user = new User() { @@ -513,7 +513,7 @@ public void ISpecificRecordJSONataFullyCompatible() schema.Metadata = new Metadata(null, new Dictionary { { "application.version", "1"} - + }, new HashSet() ); store[schemaStr] = 1; @@ -521,12 +521,12 @@ public void ISpecificRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "1"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "1" } } }; var deserConfig1 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "1"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "1" } } }; var serializer1 = new AvroSerializer(schemaRegistryClient, config1); var deserializer1 = new AvroDeserializer(schemaRegistryClient, deserConfig1); @@ -543,15 +543,15 @@ public void ISpecificRecordJSONataFullyCompatible() newSchema.Metadata = new Metadata(null, new Dictionary { { "application.version", "2"} - + }, new HashSet() ); newSchema.RuleSet = new RuleSet( new List { - new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, + new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, null, rule1To2, null, null, false), - new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, + new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, null, rule2To1, null, null, false) }, new List() ); @@ -559,12 +559,12 @@ public void ISpecificRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "2"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "2" } } }; var deserConfig2 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "2"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "2" } } }; var serializer2 = new AvroSerializer(schemaRegistryClient, config2); var deserializer2 = new AvroDeserializer(schemaRegistryClient, deserConfig2); @@ -581,15 +581,15 @@ public void ISpecificRecordJSONataFullyCompatible() newerSchema.Metadata = new Metadata(null, new Dictionary { { "application.version", "3"} - + }, new HashSet() ); newerSchema.RuleSet = new RuleSet( new List { - new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, + new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, null, rule2To3, null, null, false), - new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, + new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, null, rule3To2, null, null, false) }, new List() ); @@ -597,12 +597,12 @@ public void ISpecificRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "3"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "3" } } }; var deserConfig3 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "3"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "3" } } }; var serializer3 = new AvroSerializer(schemaRegistryClient, config3); var deserializer3 = new AvroDeserializer(schemaRegistryClient, deserConfig3); @@ -613,25 +613,25 @@ public void ISpecificRecordJSONataFullyCompatible() favorite_number = 100, title = "awesome" }; - + store[schemaStr] = 1; store[newSchemaStr] = 2; store[newerSchemaStr] = 3; - subjectStore["topic-value"] = new List { schema, newSchema, newerSchema }; + subjectStore["topic-value"] = new List { schema, newSchema, newerSchema }; Headers headers = new Headers(); var bytes = serializer1.SerializeAsync(user, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; ISpecificRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); - + bytes = serializer2.SerializeAsync(newUser, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; ISpecificRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); - + bytes = serializer3.SerializeAsync(newerUser, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; ISpecificRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); } - private void ISpecificRecordDeserializeAllVersions(AvroDeserializer deserializer1, - AvroDeserializer deserializer2, AvroDeserializer deserializer3, + private void ISpecificRecordDeserializeAllVersions(AvroDeserializer deserializer1, + AvroDeserializer deserializer2, AvroDeserializer deserializer3, byte[] bytes, Headers headers, User user) { var result1 = deserializer1.DeserializeAsync(bytes, false, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; @@ -657,7 +657,7 @@ public void GenericRecord() var serializer = new AvroSerializer(schemaRegistryClient, null); var deserializer = new AvroDeserializer(schemaRegistryClient, null); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -669,9 +669,9 @@ public void GenericRecord() Assert.Equal(user["name"], result["name"]); Assert.Equal(user["favorite_color"], result["favorite_color"]); Assert.Equal(user["favorite_number"], result["favorite_number"]); - + // serialize second object - user = new GenericRecord((RecordSchema) User._SCHEMA); + user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "cool"); user.Add("favorite_number", 100); user.Add("favorite_color", "red"); @@ -692,12 +692,12 @@ public void GenericRecordCELCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -706,7 +706,7 @@ public void GenericRecordCELCondition() var serializer = new AvroSerializer(schemaRegistryClient, config); var deserializer = new AvroDeserializer(schemaRegistryClient, null); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -728,12 +728,12 @@ public void GenericRecordCELConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL", null, null, "message.name != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -741,7 +741,7 @@ public void GenericRecordCELConditionFail() }; var serializer = new AvroSerializer(schemaRegistryClient, config); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -772,7 +772,7 @@ public void GenericRecordCELConditionEmail() var serializer = new AvroSerializer(schemaRegistryClient, config); var deserializer = new AvroDeserializer(schemaRegistryClient, null); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "bob@confluent.com"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -807,7 +807,7 @@ public void GenericRecordCELConditionEmailFail() }; var serializer = new AvroSerializer(schemaRegistryClient, config); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -824,12 +824,12 @@ public void GenericRecordCELFieldTransform() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Transform, RuleMode.Write, "CEL_FIELD", null, null, "typeName == 'STRING' ; value + '-suffix'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -838,7 +838,7 @@ public void GenericRecordCELFieldTransform() var serializer = new AvroSerializer(schemaRegistryClient, config); var deserializer = new AvroDeserializer(schemaRegistryClient, null); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -860,12 +860,12 @@ public void GenericRecordCELFieldCondition() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value == 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -874,7 +874,7 @@ public void GenericRecordCELFieldCondition() var serializer = new AvroSerializer(schemaRegistryClient, config); var deserializer = new AvroDeserializer(schemaRegistryClient, null); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -896,12 +896,12 @@ public void GenericRecordCELFieldConditionFail() schema.RuleSet = new RuleSet(new List(), new List { - new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, + new Rule("testCEL", RuleKind.Condition, RuleMode.Write, "CEL_FIELD", null, null, "name == 'name' ; value != 'awesome'", null, null, false) } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -909,7 +909,7 @@ public void GenericRecordCELFieldConditionFail() }; var serializer = new AvroSerializer(schemaRegistryClient, config); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -928,11 +928,11 @@ public void GenericRecordFieldEncryption() var schema = new RegisteredSchema("topic-value", 1, 1, schemaStr, SchemaType.Avro, null); schema.Metadata = new Metadata(new Dictionary> - { - ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.name"] = new HashSet { "PII" }, - ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.picture"] = new HashSet { "PII" } + { + ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.name"] = new HashSet { "PII" }, + ["Confluent.Kafka.Examples.AvroSpecific.UserWithPic.picture"] = new HashSet { "PII" } - }, new Dictionary(), new HashSet() + }, new Dictionary(), new HashSet() ); schema.RuleSet = new RuleSet(new List(), new List @@ -949,7 +949,7 @@ public void GenericRecordFieldEncryption() } ); store[schemaStr] = 1; - subjectStore["topic-value"] = new List { schema }; + subjectStore["topic-value"] = new List { schema }; var config = new AvroSerializerConfig { AutoRegisterSchemas = false, @@ -957,11 +957,11 @@ public void GenericRecordFieldEncryption() }; config.Set("rules.secret", "mysecret"); IRuleExecutor ruleExecutor = new FieldEncryptionExecutor(dekRegistryClient, clock); - var serializer = new AvroSerializer(schemaRegistryClient, config, new List{ ruleExecutor}); - var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List{ ruleExecutor}); + var serializer = new AvroSerializer(schemaRegistryClient, config, new List { ruleExecutor }); + var deserializer = new AvroDeserializer(schemaRegistryClient, null, new List { ruleExecutor }); var pic = new byte[] { 1, 2, 3 }; - var user = new GenericRecord((RecordSchema) UserWithPic._SCHEMA); + var user = new GenericRecord((RecordSchema)UserWithPic._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -990,7 +990,7 @@ public void GenericRecordJSONataFullyCompatible() schema.Metadata = new Metadata(null, new Dictionary { { "application.version", "1"} - + }, new HashSet() ); store[schemaStr] = 1; @@ -998,17 +998,17 @@ public void GenericRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "1"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "1" } } }; var deserConfig1 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "1"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "1" } } }; var serializer1 = new AvroSerializer(schemaRegistryClient, config1); var deserializer1 = new AvroDeserializer(schemaRegistryClient, deserConfig1); - var user = new GenericRecord((RecordSchema) User._SCHEMA); + var user = new GenericRecord((RecordSchema)User._SCHEMA); user.Add("name", "awesome"); user.Add("favorite_number", 100); user.Add("favorite_color", "blue"); @@ -1018,15 +1018,15 @@ public void GenericRecordJSONataFullyCompatible() newSchema.Metadata = new Metadata(null, new Dictionary { { "application.version", "2"} - + }, new HashSet() ); newSchema.RuleSet = new RuleSet( new List { - new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, + new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, null, rule1To2, null, null, false), - new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, + new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, null, rule2To1, null, null, false) }, new List() ); @@ -1034,17 +1034,17 @@ public void GenericRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "2"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "2" } } }; var deserConfig2 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "2"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "2" } } }; var serializer2 = new AvroSerializer(schemaRegistryClient, config2); var deserializer2 = new AvroDeserializer(schemaRegistryClient, deserConfig2); - var newUser = new GenericRecord((RecordSchema) NewUser._SCHEMA); + var newUser = new GenericRecord((RecordSchema)NewUser._SCHEMA); newUser.Add("full_name", "awesome"); newUser.Add("favorite_number", 100); newUser.Add("favorite_color", "blue"); @@ -1054,15 +1054,15 @@ public void GenericRecordJSONataFullyCompatible() newerSchema.Metadata = new Metadata(null, new Dictionary { { "application.version", "3"} - + }, new HashSet() ); newerSchema.RuleSet = new RuleSet( new List { - new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, + new Rule("myRule1", RuleKind.Transform, RuleMode.Upgrade, "JSONATA", null, null, rule2To3, null, null, false), - new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, + new Rule("myRule2", RuleKind.Transform, RuleMode.Downgrade, "JSONATA", null, null, rule3To2, null, null, false) }, new List() ); @@ -1070,17 +1070,17 @@ public void GenericRecordJSONataFullyCompatible() { AutoRegisterSchemas = false, UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "3"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "3" } } }; var deserConfig3 = new AvroDeserializerConfig { UseLatestVersion = false, - UseLatestWithMetadata = new Dictionary{ { "application.version", "3"} } + UseLatestWithMetadata = new Dictionary { { "application.version", "3" } } }; var serializer3 = new AvroSerializer(schemaRegistryClient, config3); var deserializer3 = new AvroDeserializer(schemaRegistryClient, deserConfig3); - var newerUser = new GenericRecord((RecordSchema) NewerUser._SCHEMA); + var newerUser = new GenericRecord((RecordSchema)NewerUser._SCHEMA); newerUser.Add("title", "awesome"); newerUser.Add("favorite_number", 100); newerUser.Add("favorite_color", "blue"); @@ -1088,21 +1088,21 @@ public void GenericRecordJSONataFullyCompatible() store[schemaStr] = 1; store[newSchemaStr] = 2; store[newerSchemaStr] = 3; - subjectStore["topic-value"] = new List { schema, newSchema, newerSchema }; + subjectStore["topic-value"] = new List { schema, newSchema, newerSchema }; Headers headers = new Headers(); var bytes = serializer1.SerializeAsync(user, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; GenericRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); - + bytes = serializer2.SerializeAsync(newUser, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; GenericRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); - + bytes = serializer3.SerializeAsync(newerUser, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; GenericRecordDeserializeAllVersions(deserializer1, deserializer2, deserializer3, bytes, headers, user); } - private void GenericRecordDeserializeAllVersions(AvroDeserializer deserializer1, - AvroDeserializer deserializer2, AvroDeserializer deserializer3, + private void GenericRecordDeserializeAllVersions(AvroDeserializer deserializer1, + AvroDeserializer deserializer2, AvroDeserializer deserializer3, byte[] bytes, Headers headers, GenericRecord user) { var result1 = deserializer1.DeserializeAsync(bytes, false, new SerializationContext(MessageComponentType.Value, testTopic, headers)).Result; diff --git a/test/Confluent.SchemaRegistry.Serdes.UnitTests/User2.cs b/test/Confluent.SchemaRegistry.Serdes.UnitTests/User2.cs index 47a6f1024..dea40ae60 100644 --- a/test/Confluent.SchemaRegistry.Serdes.UnitTests/User2.cs +++ b/test/Confluent.SchemaRegistry.Serdes.UnitTests/User2.cs @@ -1,4 +1,4 @@ -using Avro; +using Avro; using Avro.Specific; namespace Confluent.Kafka.Examples.AvroSpecific diff --git a/test/Confluent.SchemaRegistry.UnitTests/RegisteredSchema.cs b/test/Confluent.SchemaRegistry.UnitTests/RegisteredSchema.cs index 5f83e9db0..6be722b0d 100644 --- a/test/Confluent.SchemaRegistry.UnitTests/RegisteredSchema.cs +++ b/test/Confluent.SchemaRegistry.UnitTests/RegisteredSchema.cs @@ -1,4 +1,4 @@ -// Copyright 20 Confluent Inc. +// Copyright 20 Confluent Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/test/Confluent.SchemaRegistry.UnitTests/WildcardMatcherTest.cs b/test/Confluent.SchemaRegistry.UnitTests/WildcardMatcherTest.cs index cbdb36046..e7dc0f11a 100644 --- a/test/Confluent.SchemaRegistry.UnitTests/WildcardMatcherTest.cs +++ b/test/Confluent.SchemaRegistry.UnitTests/WildcardMatcherTest.cs @@ -54,4 +54,4 @@ public void Match() Assert.True(WildcardMatcher.Match("alice.bob.eve", "alice.bob**")); } } -} \ No newline at end of file +}