20
20
import io .strimzi .operator .cluster .operator .resource .events .KubernetesRestartEventPublisher ;
21
21
import io .strimzi .operator .cluster .operator .resource .kubernetes .PodOperator ;
22
22
import io .strimzi .operator .common .AdminClientProvider ;
23
- import io .strimzi .operator .common .Annotations ;
24
23
import io .strimzi .operator .common .BackOff ;
25
24
import io .strimzi .operator .common .Reconciliation ;
26
25
import io .strimzi .operator .common .ReconciliationLogger ;
@@ -209,19 +208,10 @@ private boolean maybeInitBrokerAdminClient() {
209
208
* Initializes controllerAdminClient if it has not been initialized yet
210
209
* @return true if the creation of AC succeeded, false otherwise
211
210
*/
212
- private boolean maybeInitControllerAdminClient (String currentVersion ) {
211
+ private boolean maybeInitControllerAdminClient () {
213
212
if (this .controllerAdminClient == null ) {
214
- // Prior to 3.9.0, Kafka did not support directly connecting to controllers nodes
215
- // via Kafka Admin API when running in KRaft mode.
216
- // Therefore, use brokers to initialise adminClient for quorum health check
217
- // when the version is older than 3.9.0.
218
213
try {
219
- if (KafkaVersion .compareDottedVersions (currentVersion , "3.9.0" ) >= 0 ) {
220
- this .controllerAdminClient = controllerAdminClient (nodes );
221
- } else {
222
- this .controllerAdminClient = brokerAdminClient (Set .of ());
223
-
224
- }
214
+ this .controllerAdminClient = controllerAdminClient (nodes );
225
215
} catch (ForceableProblem | FatalProblem e ) {
226
216
LOGGER .warnCr (reconciliation , "Failed to create controllerAdminClient." , e );
227
217
return false ;
@@ -455,11 +445,9 @@ private void restartIfNecessary(NodeRef nodeRef, RestartContext restartContext)
455
445
// change and the desired roles still apply.
456
446
boolean isBroker = Labels .booleanLabel (pod , Labels .STRIMZI_BROKER_ROLE_LABEL , nodeRef .broker ());
457
447
boolean isController = Labels .booleanLabel (pod , Labels .STRIMZI_CONTROLLER_ROLE_LABEL , nodeRef .controller ());
458
- // This is relevant when creating admin client for controllers
459
- String currentVersion = Annotations .stringAnnotation (pod , KafkaCluster .ANNO_STRIMZI_IO_KAFKA_VERSION , "0.0.0" , null );
460
448
461
449
try {
462
- checkIfRestartOrReconfigureRequired (nodeRef , isController , isBroker , restartContext , currentVersion );
450
+ checkIfRestartOrReconfigureRequired (nodeRef , isController , isBroker , restartContext );
463
451
if (restartContext .forceRestart ) {
464
452
LOGGER .debugCr (reconciliation , "Pod {} can be rolled now" , nodeRef );
465
453
restartAndAwaitReadiness (pod , operationTimeoutMs , TimeUnit .MILLISECONDS , restartContext );
@@ -589,7 +577,7 @@ private void markRestartContextWithForceRestart(RestartContext restartContext) {
589
577
* Determine whether the pod should be restarted, or the broker reconfigured.
590
578
*/
591
579
@ SuppressWarnings ("checkstyle:CyclomaticComplexity" )
592
- private void checkIfRestartOrReconfigureRequired (NodeRef nodeRef , boolean isController , boolean isBroker , RestartContext restartContext , String currentVersion ) throws ForceableProblem , InterruptedException , FatalProblem , UnforceableProblem {
580
+ private void checkIfRestartOrReconfigureRequired (NodeRef nodeRef , boolean isController , boolean isBroker , RestartContext restartContext ) throws ForceableProblem , InterruptedException , FatalProblem {
593
581
RestartReasons reasonToRestartPod = restartContext .restartReasons ;
594
582
if (restartContext .podStuck && !reasonToRestartPod .contains (RestartReason .POD_HAS_OLD_REVISION )) {
595
583
// If the pod is unschedulable then deleting it, or trying to open an Admin client to it will make no difference
@@ -612,10 +600,13 @@ private void checkIfRestartOrReconfigureRequired(NodeRef nodeRef, boolean isCont
612
600
613
601
// if it is a pure controller, initialise the admin client specifically for controllers
614
602
if (isController && !isBroker ) {
615
- if (!maybeInitControllerAdminClient (currentVersion )) {
616
- handleFailedAdminClientForController (nodeRef , restartContext , reasonToRestartPod , currentVersion );
603
+ if (!maybeInitControllerAdminClient ()) {
604
+ LOGGER .infoCr (reconciliation , "Pod {} needs to be restarted, because it does not seem to responding to connection attempts" , nodeRef );
605
+ reasonToRestartPod .add (RestartReason .POD_UNRESPONSIVE );
606
+ markRestartContextWithForceRestart (restartContext );
617
607
return ;
618
608
}
609
+ LOGGER .debugCr (reconciliation , "Initialising KafkaQuorumCheck for controller pod {}" , nodeRef );
619
610
restartContext .quorumCheck = quorumCheck (controllerAdminClient , nodeRef );
620
611
}
621
612
@@ -629,6 +620,7 @@ private void checkIfRestartOrReconfigureRequired(NodeRef nodeRef, boolean isCont
629
620
630
621
// If it is a mixed node, initialise quorum check with the broker admin client
631
622
if (isController ) {
623
+ LOGGER .debugCr (reconciliation , "Initialising KafkaQuorumCheck for mixed roles pod {}" , nodeRef );
632
624
restartContext .quorumCheck = quorumCheck (brokerAdminClient , nodeRef );
633
625
}
634
626
}
@@ -637,7 +629,6 @@ private void checkIfRestartOrReconfigureRequired(NodeRef nodeRef, boolean isCont
637
629
// connect to the node and that it's capable of responding.
638
630
Config nodeConfig ;
639
631
try {
640
- System .out .println ("TINA Getting node config for " + nodeRef .nodeId ());
641
632
nodeConfig = nodeConfig (nodeRef , isController && !isBroker );
642
633
} catch (ForceableProblem e ) {
643
634
if (restartContext .backOff .done ()) {
@@ -678,21 +669,6 @@ private void checkIfRestartOrReconfigureRequired(NodeRef nodeRef, boolean isCont
678
669
restartContext .nodeLoggingDiff = nodeLoggingDiff ;
679
670
}
680
671
681
- private void handleFailedAdminClientForController (NodeRef nodeRef , RestartContext restartContext , RestartReasons reasonToRestartPod , String currentVersion ) throws UnforceableProblem {
682
- if (KafkaVersion .compareDottedVersions (currentVersion , "3.9.0" ) >= 0 ) {
683
- // If the version supports talking to controllers, force restart this pod when the admin client cannot be initialised.
684
- // There is no reason to continue as we won't be able to connect an admin client to this pod for other checks later.
685
- LOGGER .infoCr (reconciliation , "KafkaQuorumCheck cannot be initialised for {} because none of the controllers do not seem to responding to connection attempts." , nodeRef );
686
- reasonToRestartPod .add (RestartReason .POD_UNRESPONSIVE );
687
- markRestartContextWithForceRestart (restartContext );
688
- } else {
689
- // If the version does not support talking to controllers, the admin client should be connecting to the broker nodes.
690
- // Since connection to the brokers failed, throw an UnforceableProblem so that broker nodes can be checked later
691
- // which may potentially resolve the connection issue.
692
- throw new UnforceableProblem ("KafkaQuorumCheck cannot be initialised for " + nodeRef + " because none of the brokers do not seem to responding to connection attempts" );
693
- }
694
- }
695
-
696
672
/**
697
673
* Returns a config of the given broker.
698
674
* @param nodeRef The reference of the broker.
@@ -701,13 +677,11 @@ private void handleFailedAdminClientForController(NodeRef nodeRef, RestartContex
701
677
/* test */ Config nodeConfig (NodeRef nodeRef , boolean isPureController ) throws ForceableProblem , InterruptedException {
702
678
ConfigResource resource = new ConfigResource (ConfigResource .Type .BROKER , String .valueOf (nodeRef .nodeId ()));
703
679
if (isPureController ) {
704
- System .out .println ("Getting controller config" );
705
680
return await (VertxUtil .kafkaFutureToVertxFuture (reconciliation , vertx , controllerAdminClient .describeConfigs (singletonList (resource )).values ().get (resource )),
706
681
30 , TimeUnit .SECONDS ,
707
682
error -> new ForceableProblem ("Error getting controller config: " + error , error )
708
683
);
709
684
} else {
710
- System .out .println ("Getting broker config" );
711
685
return await (VertxUtil .kafkaFutureToVertxFuture (reconciliation , vertx , brokerAdminClient .describeConfigs (singletonList (resource )).values ().get (resource )),
712
686
30 , TimeUnit .SECONDS ,
713
687
error -> new ForceableProblem ("Error getting broker config: " + error , error )
@@ -926,15 +900,7 @@ protected Future<Void> restart(Pod pod, RestartContext restartContext) {
926
900
* empty set, use the brokers service to bootstrap the client.
927
901
*/
928
902
/* test */ Admin brokerAdminClient (Set <NodeRef > nodes ) throws ForceableProblem , FatalProblem {
929
- // If no nodes are passed, initialize the admin client using the bootstrap service
930
- // This is still needed for versions older than 3.9.0, so that when only controller nodes being rolled,
931
- // it can use brokers to get quorum information via AdminClient.
932
- String bootstrapHostnames ;
933
- if (nodes .isEmpty ()) {
934
- bootstrapHostnames = String .format ("%s:%s" , DnsNameGenerator .of (namespace , KafkaResources .bootstrapServiceName (cluster )).serviceDnsName (), KafkaCluster .REPLICATION_PORT );
935
- } else {
936
- bootstrapHostnames = nodes .stream ().filter (NodeRef ::broker ).map (node -> DnsNameGenerator .podDnsName (namespace , KafkaResources .brokersServiceName (cluster ), node .podName ()) + ":" + KafkaCluster .REPLICATION_PORT ).collect (Collectors .joining ("," ));
937
- }
903
+ String bootstrapHostnames = nodes .stream ().filter (NodeRef ::broker ).map (node -> DnsNameGenerator .podDnsName (namespace , KafkaResources .brokersServiceName (cluster ), node .podName ()) + ":" + KafkaCluster .REPLICATION_PORT ).collect (Collectors .joining ("," ));
938
904
939
905
try {
940
906
LOGGER .debugCr (reconciliation , "Creating AdminClient for {}" , bootstrapHostnames );
0 commit comments