diff --git a/apis/workloads/v1/groupversion_info.go b/apis/workloads/v1/groupversion_info.go index 1fad4b7451a..0066e4b286e 100644 --- a/apis/workloads/v1/groupversion_info.go +++ b/apis/workloads/v1/groupversion_info.go @@ -38,4 +38,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -const InstanceSetKind = "InstanceSet" +const ( + InstanceSetKind = "InstanceSet" + InstanceKind = "Instance" +) diff --git a/apis/workloads/v1/instance_types.go b/apis/workloads/v1/instance_types.go index 92d82e852b1..778f2cde266 100644 --- a/apis/workloads/v1/instance_types.go +++ b/apis/workloads/v1/instance_types.go @@ -183,10 +183,25 @@ type InstanceStatus2 struct { // +optional Role string `json:"role,omitempty"` + // Represents whether the instance is provisioned. + // + // +optional + Provisioned bool `json:"provisioned,omitempty"` + + // Represents whether the instance data is loaded. + // + // +optional + DataLoaded *bool `json:"dataLoaded,omitempty"` + + // Represents whether the instance is joined the cluster. + // + // +optional + MemberJoined *bool `json:"memberJoined,omitempty"` + // Represents whether the instance is in volume expansion. // // +optional - VolumeExpansion bool `json:"volumeExpansion,omitempty"` + InVolumeExpansion bool `json:"inVolumeExpansion,omitempty"` } type InstanceAssistantObject struct { diff --git a/apis/workloads/v1/instanceset_types.go b/apis/workloads/v1/instanceset_types.go index 2008276d7b6..2a321fc9b39 100644 --- a/apis/workloads/v1/instanceset_types.go +++ b/apis/workloads/v1/instanceset_types.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" ) @@ -328,13 +329,13 @@ type InstanceSetStatus struct { // This value is set to spec.Replicas at the time of object creation and remains constant thereafter. // // +optional - InitReplicas int32 `json:"initReplicas"` + InitReplicas *int32 `json:"initReplicas"` // Represents the number of instances that have already reached the InstanceStatus during the cluster initialization stage. // This value remains constant once it equals InitReplicas. // // +optional - ReadyInitReplicas int32 `json:"readyInitReplicas,omitempty"` + ReadyInitReplicas *int32 `json:"readyInitReplicas,omitempty"` // Provides the status of each instance in the ITS. // @@ -504,6 +505,21 @@ type LifecycleActions struct { // +optional Switchover *Action `json:"switchover,omitempty"` + // Defines the procedure to add a new replica. + // + // +optional + MemberJoin *Action `json:"memberJoin,omitempty"` + + // Defines the procedure to remove a replica. + // + // +optional + MemberLeave *Action `json:"memberLeave,omitempty"` + + // Defines the procedure for importing data into a replica. + // + // +optional + DataLoad *Action `json:"dataLoad,omitempty"` + // Defines the procedure that update a replica with new configuration. // // +optional @@ -552,10 +568,25 @@ type InstanceStatus struct { // +optional Configs []InstanceConfigStatus `json:"configs,omitempty"` + // Represents whether the instance is provisioned. + // + // +optional + Provisioned bool `json:"provisioned,omitempty"` + + // Represents whether the instance data is loaded. + // + // +optional + DataLoaded *bool `json:"dataLoaded,omitempty"` + + // Represents whether the instance is joined the cluster. + // + // +optional + MemberJoined *bool `json:"memberJoined,omitempty"` + // Represents whether the instance is in volume expansion. // // +optional - VolumeExpansion bool `json:"volumeExpansion,omitempty"` + InVolumeExpansion bool `json:"inVolumeExpansion,omitempty"` } type InstanceConfigStatus struct { @@ -647,7 +678,7 @@ func (r *InstanceSet) IsInstancesReady() bool { return false } // check whether the cluster has been initialized - if r.Status.ReadyInitReplicas != r.Status.InitReplicas { + if ptr.Deref(r.Status.InitReplicas, 0) == 0 || ptr.Deref(r.Status.InitReplicas, 0) != ptr.Deref(r.Status.ReadyInitReplicas, 0) { return false } // check whether latest spec has been sent to the underlying workload @@ -669,12 +700,20 @@ func (r *InstanceSet) IsInstancesReady() bool { return false } + // check whether all instances are joined the cluster + for _, inst := range r.Status.InstanceStatus { + if !ptr.Deref(inst.MemberJoined, true) { + return false + } + } + return true } // IsInstanceSetReady gives InstanceSet level 'ready' state: -// 1. all instances are available -// 2. and all instances have role set (if they are role-ful) +// 1. all instances are ready and available +// 2. all instances are joined the cluster +// 3. all instances have role set (if they are role-ful) func (r *InstanceSet) IsInstanceSetReady() bool { instancesReady := r.IsInstancesReady() if !instancesReady { @@ -696,3 +735,7 @@ func (r *InstanceSet) IsRoleProbeDone() bool { } return cnt == replicas } + +func (r *InstanceSet) IsInInitializing() bool { + return r.Status.InitReplicas == nil || *r.Status.InitReplicas != ptr.Deref(r.Status.ReadyInitReplicas, 0) +} diff --git a/apis/workloads/v1/zz_generated.deepcopy.go b/apis/workloads/v1/zz_generated.deepcopy.go index fd68bf1ac85..7257932ccc2 100644 --- a/apis/workloads/v1/zz_generated.deepcopy.go +++ b/apis/workloads/v1/zz_generated.deepcopy.go @@ -345,6 +345,16 @@ func (in *InstanceSetStatus) DeepCopyInto(out *InstanceSetStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.InitReplicas != nil { + in, out := &in.InitReplicas, &out.InitReplicas + *out = new(int32) + **out = **in + } + if in.ReadyInitReplicas != nil { + in, out := &in.ReadyInitReplicas, &out.ReadyInitReplicas + *out = new(int32) + **out = **in + } if in.InstanceStatus != nil { in, out := &in.InstanceStatus, &out.InstanceStatus *out = make([]InstanceStatus, len(*in)) @@ -453,6 +463,16 @@ func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { *out = make([]InstanceConfigStatus, len(*in)) copy(*out, *in) } + if in.DataLoaded != nil { + in, out := &in.DataLoaded, &out.DataLoaded + *out = new(bool) + **out = **in + } + if in.MemberJoined != nil { + in, out := &in.MemberJoined, &out.MemberJoined + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. @@ -475,6 +495,16 @@ func (in *InstanceStatus2) DeepCopyInto(out *InstanceStatus2) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DataLoaded != nil { + in, out := &in.DataLoaded, &out.DataLoaded + *out = new(bool) + **out = **in + } + if in.MemberJoined != nil { + in, out := &in.MemberJoined, &out.MemberJoined + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus2. @@ -588,6 +618,21 @@ func (in *LifecycleActions) DeepCopyInto(out *LifecycleActions) { *out = new(appsv1.Action) (*in).DeepCopyInto(*out) } + if in.MemberJoin != nil { + in, out := &in.MemberJoin, &out.MemberJoin + *out = new(appsv1.Action) + (*in).DeepCopyInto(*out) + } + if in.MemberLeave != nil { + in, out := &in.MemberLeave, &out.MemberLeave + *out = new(appsv1.Action) + (*in).DeepCopyInto(*out) + } + if in.DataLoad != nil { + in, out := &in.DataLoad, &out.DataLoad + *out = new(appsv1.Action) + (*in).DeepCopyInto(*out) + } if in.Reconfigure != nil { in, out := &in.Reconfigure, &out.Reconfigure *out = new(appsv1.Action) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 5f9e4f8dc54..923ad569af7 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -479,7 +479,7 @@ func main() { } if err = (&component.ComponentReconciler{ - Client: client, + Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("component-controller"), }).SetupWithManager(mgr); err != nil { diff --git a/config/crd/bases/workloads.kubeblocks.io_instances.yaml b/config/crd/bases/workloads.kubeblocks.io_instances.yaml index 2256ea93804..cd77bd89100 100644 --- a/config/crd/bases/workloads.kubeblocks.io_instances.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_instances.yaml @@ -1095,6 +1095,1293 @@ spec: description: Defines a set of hooks that customize the behavior of an Instance throughout its lifecycle. properties: + dataLoad: + description: Defines the procedure for importing data into a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberJoin: + description: Defines the procedure to add a new replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberLeave: + description: Defines the procedure to remove a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object reconfigure: description: Defines the procedure that update a replica with new configuration. @@ -9956,12 +11243,24 @@ spec: description: currentRevision, if not empty, indicates the version of the Instance used to generate pod. type: string + dataLoaded: + description: Represents whether the instance data is loaded. + type: boolean + inVolumeExpansion: + description: Represents whether the instance is in volume expansion. + type: boolean + memberJoined: + description: Represents whether the instance is joined the cluster. + type: boolean observedGeneration: description: |- observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the InstanceSet's generation, which is updated on mutation by the API Server. format: int64 type: integer + provisioned: + description: Represents whether the instance is provisioned. + type: boolean ready: description: Represents whether the instance is in ready condition. type: boolean @@ -9975,9 +11274,6 @@ spec: description: updateRevision, if not empty, indicates the version of the Instance used to generate pod. type: string - volumeExpansion: - description: Represents whether the instance is in volume expansion. - type: boolean type: object type: object served: true diff --git a/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml index 3d52c5542a7..3b8e23aca2d 100644 --- a/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml @@ -2564,6 +2564,1293 @@ spec: description: Defines a set of hooks that customize the behavior of an Instance throughout its lifecycle. properties: + dataLoad: + description: Defines the procedure for importing data into a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberJoin: + description: Defines the procedure to add a new replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberLeave: + description: Defines the procedure to remove a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object reconfigure: description: Defines the procedure that update a replica with new configuration. @@ -11721,16 +13008,25 @@ spec: - name type: object type: array + dataLoaded: + description: Represents whether the instance data is loaded. + type: boolean + inVolumeExpansion: + description: Represents whether the instance is in volume expansion. + type: boolean + memberJoined: + description: Represents whether the instance is joined the cluster. + type: boolean podName: default: Unknown description: Represents the name of the pod. type: string + provisioned: + description: Represents whether the instance is provisioned. + type: boolean role: description: Represents the role of the instance observed. type: string - volumeExpansion: - description: Represents whether the instance is in volume expansion. - type: boolean required: - podName type: object diff --git a/controllers/apps/component/component_controller_test.go b/controllers/apps/component/component_controller_test.go index a487fadd4b5..1cff9819ad1 100644 --- a/controllers/apps/component/component_controller_test.go +++ b/controllers/apps/component/component_controller_test.go @@ -21,8 +21,8 @@ package component import ( "fmt" + "slices" "strconv" - "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -30,10 +30,8 @@ import ( "github.com/sethvargo/go-password/password" "golang.org/x/exp/maps" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -49,23 +47,16 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/component" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/generics" - kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" testk8s "github.com/apecloud/kubeblocks/pkg/testutil/k8s" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) -const ( - podAnnotationKey4Test = "component-replicas-test" -) - var _ = Describe("Component Controller", func() { const ( compDefName = "test-compdef" compVerName = "test-compver" clusterName = "test-cluster" - leader = "leader" - follower = "follower" defaultCompName = "default" ) @@ -107,11 +98,8 @@ var _ = Describe("Component Controller", func() { testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ServiceAccountSignature, true, inNS) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RoleSignature, true, inNS) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RoleBindingSignature, true, inNS) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ConfigMapSignature, true, inNS, ml) // non-namespaced - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.StorageClassSignature, true, ml) resetTestContext() } @@ -208,23 +196,6 @@ var _ = Describe("Component Controller", func() { })).Should(Equal(kbappsv1.RunningComponentPhase)) } - stableCompObservedGeneration := func(compKey types.NamespacedName, waitFor *time.Duration) (int64, *kbappsv1.Component) { - sleepTime := 300 * time.Millisecond - if waitFor != nil { - sleepTime = *waitFor - } - time.Sleep(sleepTime) - comp := &kbappsv1.Component{} - Expect(testCtx.Cli.Get(testCtx.Ctx, compKey, comp)).Should(Succeed()) - return comp.Status.ObservedGeneration, comp - } - - changeCompReplicas := func(compKey types.NamespacedName, replicas int32) { - Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) { - comp.Spec.Replicas = replicas - })()).ShouldNot(HaveOccurred()) - } - testChangeReplicas := func(compName, compDefName string) { compDefKey := client.ObjectKeyFromObject(compDefObj) Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) { @@ -235,15 +206,18 @@ var _ = Describe("Component Controller", func() { expectedOG := int64(1) for _, replicas := range []int32{5, 3, 1, 2, 4} { By(fmt.Sprintf("change replicas to %d", replicas)) - changeCompReplicas(compKey, replicas) + Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) { + comp.Spec.Replicas = replicas + })()).ShouldNot(HaveOccurred()) expectedOG++ - By("checking component status and the number of replicas changed") + By("checking the component status") Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) { g.Expect(comp.Status.ObservedGeneration).To(BeEquivalentTo(expectedOG)) g.Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(BeElementOf(kbappsv1.CreatingComponentPhase, kbappsv1.UpdatingComponentPhase)) })).Should(Succeed()) + By("checking the number of replicas in ITS as expected") itsKey := compKey Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { g.Expect(int(*its.Spec.Replicas)).To(BeEquivalentTo(replicas)) @@ -265,12 +239,9 @@ var _ = Describe("Component Controller", func() { }) By(fmt.Sprintf("change replicas to %d", target)) - changeCompReplicas(compKey, target) - - By("checking the number of replicas in component as expected") - Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) { - g.Expect(comp.Spec.Replicas).Should(Equal(target)) - })).Should(Succeed()) + Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) { + comp.Spec.Replicas = target + })()).ShouldNot(HaveOccurred()) By("checking the component status can't be reconciled well") Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) { @@ -279,29 +250,25 @@ var _ = Describe("Component Controller", func() { By("checking the number of replicas in ITS unchanged") itsKey := compKey - Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + Consistently(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { g.Expect(*its.Spec.Replicas).Should(Equal(init)) })).Should(Succeed()) } - changeReplicasLimit := func(compDefName string, minReplicas, maxReplicas int32) { - By(fmt.Sprintf("set replicas limit to [%d, %d]", minReplicas, maxReplicas)) - compDefKey := types.NamespacedName{Name: compDefName} - Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) { - compDef.Spec.ReplicasLimit = &kbappsv1.ReplicasLimit{ - MinReplicas: minReplicas, - MaxReplicas: maxReplicas, - } - })).Should(Succeed()) - } - testChangeReplicasToZeroWithReplicasLimit := func(compName, compDefName string) { var ( init = int32(3) target = int32(0) ) - changeReplicasLimit(compDefName, 0, 16384) + By(fmt.Sprintf("set replicas limit to [%d, %d]", 0, 16384)) + compDefKey := types.NamespacedName{Name: compDefName} + Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) { + compDef.Spec.ReplicasLimit = &kbappsv1.ReplicasLimit{ + MinReplicas: 0, + MaxReplicas: 16384, + } + })).Should(Succeed()) createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) { f.SetReplicas(init). @@ -311,11 +278,12 @@ var _ = Describe("Component Controller", func() { }) By(fmt.Sprintf("change replicas to %d", target)) - changeCompReplicas(compKey, target) + Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) { + comp.Spec.Replicas = target + })()).ShouldNot(HaveOccurred()) - By("checking the number of replicas in component as expected") + By("checking the component status") Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) { - g.Expect(comp.Spec.Replicas).Should(Equal(target)) g.Expect(comp.Generation).Should(Equal(comp.Status.ObservedGeneration)) })).Should(Succeed()) @@ -326,231 +294,92 @@ var _ = Describe("Component Controller", func() { })).Should(Succeed()) } - getPVCName := func(vctName, compName string, i int) string { - return fmt.Sprintf("%s-%s-%s-%d", vctName, clusterKey.Name, compName, i) - } + testChangeReplicasWithDataAction := func(compName, compDefName string) { + By("update cmpd to enable data actions") + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(compDefObj), func(cmpd *kbappsv1.ComponentDefinition) { + cmpd.Spec.LifecycleActions.DataDump = testapps.NewLifecycleAction("data-dump") + cmpd.Spec.LifecycleActions.DataLoad = testapps.NewLifecycleAction("data-load") + })()).Should(Succeed()) - createPVC := func(clusterName, pvcName, compName, storageSize, storageClassName string) { - if storageSize == "" { - storageSize = "1Gi" - } - testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, - compName, testapps.DataVolumeName). - AddLabelsInMap(map[string]string{ - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - constant.AppManagedByLabelKey: constant.AppName, - }). - SetStorage(storageSize). - SetStorageClass(storageClassName). - CheckedCreate(&testCtx) - } + var ( + initReplicas = int32(1) + targetReplicas = int32(3) + ) - mockComponentPVCsAndBound := func(comp *kbappsv1.Component, compName string, replicas int, create bool, storageClassName string) { - for i := 0; i < replicas; i++ { - for _, vct := range comp.Spec.VolumeClaimTemplates { - pvcKey := types.NamespacedName{ - Namespace: clusterKey.Namespace, - Name: getPVCName(vct.Name, compName, i), - } - if create { - createPVC(clusterKey.Name, pvcKey.Name, compName, vct.Spec.Resources.Requests.Storage().String(), storageClassName) - } - Eventually(testapps.CheckObjExists(&testCtx, pvcKey, - &corev1.PersistentVolumeClaim{}, true)).Should(Succeed()) - Eventually(testapps.GetAndChangeObjStatus(&testCtx, pvcKey, func(pvc *corev1.PersistentVolumeClaim) { - pvc.Status.Phase = corev1.ClaimBound - if pvc.Status.Capacity == nil { - pvc.Status.Capacity = corev1.ResourceList{} - } - pvc.Status.Capacity[corev1.ResourceStorage] = pvc.Spec.Resources.Requests[corev1.ResourceStorage] - })).Should(Succeed()) - } - } - } + createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) { + f.SetReplicas(initReplicas) + }) - mockPodsForTest := func(clusterName, compName, compDefName string, number int) []*corev1.Pod { - itsName := clusterName + "-" + compName - pods := make([]*corev1.Pod, 0) - for i := 0; i < number; i++ { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: itsName + "-" + strconv.Itoa(i), - Namespace: testCtx.DefaultNamespace, - Labels: map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppNameLabelKey: compDefName, - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - appsv1.ControllerRevisionHashLabelKey: "mock-version", - }, - Annotations: map[string]string{ - podAnnotationKey4Test: fmt.Sprintf("%d", number), + By("mock ITS ready") + itsKey := compKey + Expect(testapps.GetAndChangeObjStatus(&testCtx, itsKey, func(its *workloads.InstanceSet) { + pods := []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", compKey.Name, 0), + Labels: map[string]string{ + constant.RoleLabelKey: "leader", + }, }, }, - Spec: corev1.PodSpec{ + } + testk8s.MockInstanceSetReady(its, pods...) + })()).ShouldNot(HaveOccurred()) + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + + uid := strconv.FormatInt(compObj.Generation, 10) + source := &lifecycleReplica{ + synthesizedComp: &component.SynthesizedComponent{ + Namespace: compObj.Namespace, + FullCompName: compObj.Name, + PodSpec: &corev1.PodSpec{ Containers: []corev1.Container{ - { - Name: "mock-container", - Image: "mock-image", - }, testapps.MockKBAgentContainer(), }, }, - } - pods = append(pods, pod) - } - return pods - } - - horizontalScaleComp := func(updatedReplicas int, comp *kbappsv1.Component, compName, storageClassName string) { - By("Mocking component PVCs to bound") - mockComponentPVCsAndBound(comp, compName, int(comp.Spec.Replicas), true, storageClassName) - - By("Checking its replicas right") - itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName) - Expect(int(*itsList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Spec.Replicas)) - - By("Creating mock pods in InstanceSet") - pods := mockPodsForTest(clusterKey.Name, compName, comp.Spec.CompDef, int(comp.Spec.Replicas)) - for i := range pods { - if i == 0 { - pods[i].Labels[constant.RoleLabelKey] = leader - } else { - pods[i].Labels[constant.RoleLabelKey] = follower - } - pods[i].Status.Conditions = []corev1.PodCondition{{ - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }} - Expect(testCtx.CheckedCreateObj(testCtx.Ctx, pods[i])).Should(Succeed()) - } - Expect(testapps.ChangeObjStatus(&testCtx, &itsList.Items[0], func() { - testk8s.MockInstanceSetReady(&itsList.Items[0], pods...) - })).ShouldNot(HaveOccurred()) - - By("Waiting for the component enter Running phase") - Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.RunningComponentPhase)) - - By(fmt.Sprintf("Changing replicas to %d", updatedReplicas)) - changeCompReplicas(compKey, int32(updatedReplicas)) - - checkUpdatedItsReplicas := func() { - By("Checking updated its replicas") - Eventually(func() int32 { - itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName) - return *itsList.Items[0].Spec.Replicas - }).Should(BeEquivalentTo(updatedReplicas)) - } - - scaleOutCheck := func() { - if comp.Spec.Replicas == 0 { - return - } - - By("Mock PVCs and set status to bound") - mockComponentPVCsAndBound(comp, compName, updatedReplicas, true, storageClassName) - - checkUpdatedItsReplicas() - - By("Checking updated its replicas' PVC and size") - for _, vct := range comp.Spec.VolumeClaimTemplates { - var volumeQuantity resource.Quantity - for i := 0; i < updatedReplicas; i++ { - pvcKey := types.NamespacedName{ - Namespace: clusterKey.Namespace, - Name: getPVCName(vct.Name, compName, i), - } - Eventually(testapps.CheckObj(&testCtx, pvcKey, func(g Gomega, pvc *corev1.PersistentVolumeClaim) { - if volumeQuantity.IsZero() { - volumeQuantity = pvc.Spec.Resources.Requests[corev1.ResourceStorage] - } - Expect(pvc.Spec.Resources.Requests[corev1.ResourceStorage]).To(Equal(volumeQuantity)) - Expect(pvc.Status.Capacity[corev1.ResourceStorage]).To(Equal(volumeQuantity)) - })).Should(Succeed()) - } - } + }, + instance: workloads.InstanceStatus{ + PodName: fmt.Sprintf("%s-0", compKey.Name), + }, } - - scaleInCheck := func() { - checkUpdatedItsReplicas() - - By("Checking pod's annotation should be updated consistently") - Eventually(func(g Gomega) { - podList := corev1.PodList{} - g.Expect(k8sClient.List(testCtx.Ctx, &podList, client.MatchingLabels{ - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: compName, - })).Should(Succeed()) - for _, pod := range podList.Items { - ss := strings.Split(pod.Name, "-") - ordinal, _ := strconv.Atoi(ss[len(ss)-1]) - if ordinal >= updatedReplicas { - continue - } - // The annotation was updated by the mocked member leave action. - g.Expect(pod.Annotations[podAnnotationKey4Test]).Should(Equal(fmt.Sprintf("%d", updatedReplicas))) - } - }).Should(Succeed()) + replicas := make([]string, 0) + for i := initReplicas; i < targetReplicas; i++ { + replicas = append(replicas, fmt.Sprintf("%s-%d", compKey.Name, i)) } + slices.Sort(replicas) + parameters, err := component.NewReplicaTask(compKey.Name, uid, source, replicas) + Expect(err).NotTo(HaveOccurred()) - if int(comp.Spec.Replicas) < updatedReplicas { - scaleOutCheck() - } - if int(comp.Spec.Replicas) > updatedReplicas { - scaleInCheck() + By("check data replication task parameters") + envCMKey := types.NamespacedName{ + Namespace: compKey.Namespace, + Name: constant.GetCompEnvCMName(compKey.Name), } - } - - horizontalScale := func(updatedReplicas int, storageClassName, compName string, compDefNames ...string) { - defer kbacli.UnsetMockClient() - - initialGeneration, comp := stableCompObservedGeneration(compKey, nil) - - By("mock all component PVCs to bound") - mockComponentPVCsAndBound(comp, compName, int(comp.Spec.Replicas), true, storageClassName) - - By("mock kb-agent for h-scale") - testapps.MockKBAgentClient4HScale(&testCtx, clusterKey, compName, podAnnotationKey4Test, updatedReplicas) - - By(fmt.Sprintf("h-scale component %s", compName)) - horizontalScaleComp(updatedReplicas, comp, compName, storageClassName) - - By("check component status and the number of replicas changed") - Eventually(testapps.GetComponentObservedGeneration(&testCtx, compKey)).Should(BeEquivalentTo(int(initialGeneration) + 1)) - } - - testHorizontalScale := func(compName, compDefName string, initialReplicas, updatedReplicas int32) { - By("creating a component with VolumeClaimTemplate") - pvcSpec := testapps.NewPVCSpec("1Gi") - createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) { - f.SetReplicas(initialReplicas). - AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec). - AddVolumeClaimTemplate(testapps.LogVolumeName, pvcSpec) - if updatedReplicas == 0 { - f.SetPVCRetentionPolicy(&kbappsv1.PersistentVolumeClaimRetentionPolicy{ - WhenScaled: kbappsv1.RetainPersistentVolumeClaimRetentionPolicyType, - }) + Consistently(testapps.CheckObj(&testCtx, envCMKey, func(g Gomega, cm *corev1.ConfigMap) { + for key := range parameters { + g.Expect(cm.Data).ShouldNot(HaveKey(key)) } - }) - horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compName, compDefName) - } - - testHorizontalScaleWithDataActions := func(compName, compDefName string, initialReplicas, updatedReplicas int32) { - By("update cmpd to enable data actions") - Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(compDefObj), func(cmpd *kbappsv1.ComponentDefinition) { - cmpd.Spec.LifecycleActions.DataDump = testapps.NewLifecycleAction("data-dump") - cmpd.Spec.LifecycleActions.DataLoad = testapps.NewLifecycleAction("data-load") - })()).Should(Succeed()) + })).Should(Succeed()) - By("creating a component with VolumeClaimTemplate") - pvcSpec := testapps.NewPVCSpec("1Gi") - createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) { - f.SetReplicas(initialReplicas). - AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec) - }) + By(fmt.Sprintf("change replicas to %d", targetReplicas)) + generation := compObj.Generation + Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) { + generation = comp.Generation + 1 + comp.Spec.Replicas = targetReplicas + })()).ShouldNot(HaveOccurred()) - horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compName, compDefName) + By("check data replication task parameters") + uid = strconv.FormatInt(generation, 10) + parameters, err = component.NewReplicaTask(compKey.Name, uid, source, replicas) + Expect(err).NotTo(HaveOccurred()) + Eventually(testapps.CheckObj(&testCtx, envCMKey, func(g Gomega, cm *corev1.ConfigMap) { + for key, val := range parameters { + g.Expect(cm.Data).Should(HaveKey(key)) + g.Expect(cm.Data[key]).Should(Equal(val)) + } + })).Should(Succeed()) } testVolumeExpansion := func(compName, compDefName string) { @@ -1712,7 +1541,7 @@ var _ = Describe("Component Controller", func() { cleanEnv() }) - It("should create/delete pods to match the desired replica number", func() { + It("change replicas", func() { testChangeReplicas(defaultCompName, compDefObj.Name) }) @@ -1724,43 +1553,19 @@ var _ = Describe("Component Controller", func() { testChangeReplicasToZeroWithReplicasLimit(defaultCompName, compDefObj.Name) }) - It("scale-out from 1 to 3", func() { - testHorizontalScale(defaultCompName, compDefObj.Name, 1, 3) - }) - - It("scale-in from 3 to 1", func() { - testHorizontalScale(defaultCompName, compDefObj.Name, 3, 1) - }) - - It("scale-in to 0 and PVCs should not been deleted", func() { - changeReplicasLimit(compDefObj.Name, 0, 16384) - - testHorizontalScale(defaultCompName, compDefObj.Name, 3, 0) - }) - - It("h-scale with data actions", func() { - testHorizontalScaleWithDataActions(defaultCompName, compDefObj.Name, 1, 2) + It("scale-out with data action", func() { + testChangeReplicasWithDataAction(defaultCompName, compDefObj.Name) }) }) Context("volume expansion", func() { - var ( - mockStorageClass *storagev1.StorageClass - ) - BeforeEach(func() { createDefinitionObjects() - mockStorageClass = testk8s.CreateMockStorageClass(&testCtx, testk8s.DefaultStorageClassName) }) It("should update PVC request storage size accordingly", func() { testVolumeExpansion(defaultCompName, compDefObj.Name) }) - - It("scale-out", func() { - testVolumeExpansion(defaultCompName, compDefObj.Name) - horizontalScale(5, mockStorageClass.Name, defaultCompName, compDefObj.Name) - }) }) Context("start & stop", func() { diff --git a/controllers/apps/component/transformer_component_account_provision.go b/controllers/apps/component/transformer_component_account_provision.go index c80144aa1e5..6b4d78649ec 100644 --- a/controllers/apps/component/transformer_component_account_provision.go +++ b/controllers/apps/component/transformer_component_account_provision.go @@ -35,7 +35,6 @@ import ( appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" "github.com/apecloud/kubeblocks/pkg/common" "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" @@ -93,7 +92,7 @@ func (t *componentAccountProvisionTransformer) Transform(ctx graph.TransformCont return nil } - lfa, err2 := t.lifecycleAction(transCtx) + lfa, err2 := newLifecycleAction("account-provision", transCtx.SynthesizeComponent, transCtx.RunningWorkload) if err2 != nil { return err2 } @@ -134,21 +133,6 @@ func (t *componentAccountProvisionTransformer) Transform(ctx graph.TransformCont return err3 } -func (t *componentAccountProvisionTransformer) lifecycleAction(transCtx *componentTransformContext) (lifecycle.Lifecycle, error) { - synthesizedComp := transCtx.SynthesizeComponent - pods, err := component.ListOwnedPods(transCtx.Context, transCtx.Client, - synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name) - if err != nil { - return nil, err - } - lfa, err := lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, nil, pods...) - if err != nil { - return nil, err - } - return lfa, nil -} - func (t *componentAccountProvisionTransformer) createAccount(transCtx *componentTransformContext, lfa lifecycle.Lifecycle, cond *metav1.Condition, account synthesizedSystemAccount, secret *corev1.Secret) error { var ( diff --git a/controllers/apps/component/transformer_component_post_provision.go b/controllers/apps/component/transformer_component_post_provision.go index df78c6bb7e4..2ba70b5b9e0 100644 --- a/controllers/apps/component/transformer_component_post_provision.go +++ b/controllers/apps/component/transformer_component_post_provision.go @@ -24,7 +24,6 @@ import ( "fmt" "time" - "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" "github.com/apecloud/kubeblocks/pkg/controller/model" @@ -83,28 +82,13 @@ func (t *componentPostProvisionTransformer) markPostProvisionDone(transCtx *comp } func (t *componentPostProvisionTransformer) postProvision(transCtx *componentTransformContext) error { - lfa, err := t.lifecycleAction4Component(transCtx) + lfa, err := newLifecycleAction("post-provision", transCtx.SynthesizeComponent, transCtx.RunningWorkload) if err != nil { return err } return lfa.PostProvision(transCtx.Context, transCtx.Client, nil) } -func (t *componentPostProvisionTransformer) lifecycleAction4Component(transCtx *componentTransformContext) (lifecycle.Lifecycle, error) { - synthesizedComp := transCtx.SynthesizeComponent - pods, err := component.ListOwnedPods(transCtx.Context, transCtx.Client, - synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name) - if err != nil { - return nil, err - } - if len(pods) == 0 { - // TODO: (good-first-issue) we should handle the case that the component has no pods - return nil, fmt.Errorf("has no pods to running the post-provision action") - } - return lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, nil, pods...) -} - func checkPostProvisionDone(transCtx *componentTransformContext) bool { synthesizedComp := transCtx.SynthesizeComponent if synthesizedComp == nil || synthesizedComp.LifecycleActions == nil || synthesizedComp.LifecycleActions.PostProvision == nil { diff --git a/controllers/apps/component/transformer_component_post_provision_test.go b/controllers/apps/component/transformer_component_post_provision_test.go index 2084789c534..c7fe4321d1a 100644 --- a/controllers/apps/component/transformer_component_post_provision_test.go +++ b/controllers/apps/component/transformer_component_post_provision_test.go @@ -27,13 +27,13 @@ import ( . "github.com/onsi/gomega" "github.com/golang/mock/gomock" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" appsutil "github.com/apecloud/kubeblocks/controllers/apps/util" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" @@ -117,6 +117,7 @@ var _ = Describe("post-provision transformer test", func() { Component: comp, ComponentOrig: comp.DeepCopy(), SynthesizeComponent: synthesizeComponent, + RunningWorkload: &workloads.InstanceSet{}, } }) @@ -137,17 +138,11 @@ var _ = Describe("post-provision transformer test", func() { }).AnyTimes() }) - reader.Objects = append(reader.Objects, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testCtx.DefaultNamespace, - Name: fmt.Sprintf("%s-0", constant.GenerateWorkloadNamePattern(clusterName, compName)), - Labels: map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - }, + transCtx.RunningWorkload.(*workloads.InstanceSet).Status.InstanceStatus = []workloads.InstanceStatus{ + { + PodName: fmt.Sprintf("%s-0", constant.GenerateWorkloadNamePattern(clusterName, compName)), }, - }) + } }) It("ok", func() { @@ -178,7 +173,7 @@ var _ = Describe("post-provision transformer test", func() { transformer := &componentPostProvisionTransformer{} err := transformer.Transform(transCtx, dag) Expect(err).ShouldNot(BeNil()) - Expect(err.Error()).Should(ContainSubstring("has no pods to running the post-provision action")) + Expect(err.Error()).Should(ContainSubstring("has no pods to calling the post-provision action")) }) }) }) diff --git a/controllers/apps/component/transformer_component_pre_terminate.go b/controllers/apps/component/transformer_component_pre_terminate.go index da0d858941b..52a991321af 100644 --- a/controllers/apps/component/transformer_component_pre_terminate.go +++ b/controllers/apps/component/transformer_component_pre_terminate.go @@ -107,13 +107,12 @@ func (t *componentPreTerminateTransformer) provisioned(transCtx *componentTransf return false, client.IgnoreNotFound(err) } - provisioned, err := component.GetReplicasStatusFunc(its, func(s component.ReplicaStatus) bool { - return s.Provisioned - }) - if err != nil { - return false, err + for _, inst := range its.Status.InstanceStatus { + if inst.Provisioned { + return true, nil + } } - return len(provisioned) > 0, nil + return false, nil } func (t *componentPreTerminateTransformer) checkPreTerminateDone(transCtx *componentTransformContext, dag *graph.DAG) bool { @@ -145,29 +144,27 @@ func (t *componentPreTerminateTransformer) markPreTerminateDone(transCtx *compon } func (t *componentPreTerminateTransformer) preTerminate(transCtx *componentTransformContext, compDef *appsv1.ComponentDefinition) error { - lfa, err := t.lifecycleAction4Component(transCtx, compDef) + lfa, err := t.newLifecycleAction(transCtx, compDef) if err != nil { return err } return lfa.PreTerminate(transCtx.Context, transCtx.Client, nil) } -func (t *componentPreTerminateTransformer) lifecycleAction4Component(transCtx *componentTransformContext, compDef *appsv1.ComponentDefinition) (lifecycle.Lifecycle, error) { - synthesizedComp, err1 := t.synthesizedComponent(transCtx, compDef) - if err1 != nil { - return nil, err1 +func (t *componentPreTerminateTransformer) newLifecycleAction(transCtx *componentTransformContext, compDef *appsv1.ComponentDefinition) (lifecycle.Lifecycle, error) { + synthesizedComp, err := t.synthesizedComponent(transCtx, compDef) + if err != nil { + return nil, err } - pods, err2 := component.ListOwnedPods(transCtx.Context, transCtx.Client, - synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name) - if err2 != nil { - return nil, err2 + itsKey := types.NamespacedName{ + Namespace: synthesizedComp.Namespace, + Name: synthesizedComp.FullCompName, } - if len(pods) == 0 { - // TODO: (good-first-issue) we should handle the case that the component has no pods - return nil, fmt.Errorf("has no pods to running the pre-terminate action") + its := &workloads.InstanceSet{} + if err = transCtx.Client.Get(transCtx.Context, itsKey, its); err != nil { + return nil, client.IgnoreNotFound(err) } - return lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, nil, pods...) + return newLifecycleAction("pre-terminate", synthesizedComp, its) } func (t *componentPreTerminateTransformer) synthesizedComponent(transCtx *componentTransformContext, compDef *appsv1.ComponentDefinition) (*component.SynthesizedComponent, error) { diff --git a/controllers/apps/component/transformer_component_pre_terminate_test.go b/controllers/apps/component/transformer_component_pre_terminate_test.go index dad9a3c3e8d..8d41b66052f 100644 --- a/controllers/apps/component/transformer_component_pre_terminate_test.go +++ b/controllers/apps/component/transformer_component_pre_terminate_test.go @@ -27,7 +27,6 @@ import ( . "github.com/onsi/gomega" "github.com/golang/mock/gomock" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/utils/ptr" @@ -37,7 +36,6 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" appsutil "github.com/apecloud/kubeblocks/controllers/apps/util" "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client" @@ -65,10 +63,12 @@ var _ = Describe("pre-terminate transformer test", func() { } provisioned := func(its *workloads.InstanceSet) { - replicas := []string{ - fmt.Sprintf("%s-0", its.Name), + its.Status.InstanceStatus = []workloads.InstanceStatus{ + { + PodName: fmt.Sprintf("%s-0", its.Name), + Provisioned: true, + }, } - Expect(component.StatusReplicasStatus(its, replicas, false, false)).Should(Succeed()) } BeforeEach(func() { @@ -153,19 +153,6 @@ var _ = Describe("pre-terminate transformer test", func() { }).AnyTimes() }) - // mock pods to run the pre-terminate action - reader.Objects = append(reader.Objects, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testCtx.DefaultNamespace, - Name: fmt.Sprintf("%s-0", constant.GenerateWorkloadNamePattern(clusterName, compName)), - Labels: map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - }, - }, - }) - transformer := &componentPreTerminateTransformer{} err := transformer.Transform(transCtx, dag) Expect(err).ShouldNot(BeNil()) @@ -173,12 +160,12 @@ var _ = Describe("pre-terminate transformer test", func() { Expect(preTerminated).Should(BeTrue()) }) - It("no pods error", func() { - transformer := &componentPreTerminateTransformer{} - err := transformer.Transform(transCtx, dag) - Expect(err).ShouldNot(BeNil()) - Expect(err.Error()).Should(ContainSubstring("has no pods to running the pre-terminate action")) - }) + // It("no pods error", func() { + // transformer := &componentPreTerminateTransformer{} + // err := transformer.Transform(transCtx, dag) + // Expect(err).ShouldNot(BeNil()) + // Expect(err.Error()).Should(ContainSubstring("has no pods to calling the pre-terminate action")) + // }) It("not-defined", func() { compDef := reader.Objects[0].(*appsv1.ComponentDefinition) @@ -199,12 +186,7 @@ var _ = Describe("pre-terminate transformer test", func() { It("not provisioned", func() { its := reader.Objects[1].(*workloads.InstanceSet) - Expect(component.UpdateReplicasStatusFunc(its, func(r *component.ReplicasStatus) error { - for i := range r.Status { - r.Status[i].Provisioned = false - } - return nil - })).Should(Succeed()) + its.Status.InstanceStatus[0].Provisioned = false transformer := &componentPreTerminateTransformer{} err := transformer.Transform(transCtx, dag) diff --git a/controllers/apps/component/transformer_component_status.go b/controllers/apps/component/transformer_component_status.go index fb5163f7516..4bc0cac398b 100644 --- a/controllers/apps/component/transformer_component_status.go +++ b/controllers/apps/component/transformer_component_status.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -130,18 +131,15 @@ func (t *componentStatusTransformer) reconcileStatus(transCtx *componentTransfor // check if the component has failed pod hasFailedPod, messages := t.hasFailedPod() - // check if the component scale out failed - hasRunningScaleOut, hasFailedScaleOut, err := t.hasScaleOutRunning(transCtx) - if err != nil { - return err - } + // check if the component has scale out running + hasRunningScaleOut := t.hasScaleOutRunning() // check if the volume expansion is running hasRunningVolumeExpansion := t.hasVolumeExpansionRunning() // check if the component has failure hasFailure := func() bool { - return hasFailedPod || hasFailedScaleOut + return hasFailedPod }() // check if the component is in creating phase @@ -219,35 +217,19 @@ func (t *componentStatusTransformer) isInstanceSetRunning() bool { return t.runningITS.IsInstanceSetReady() } -// hasScaleOutRunning checks if the scale out is running. -func (t *componentStatusTransformer) hasScaleOutRunning(transCtx *componentTransformContext) (running bool, failed bool, err error) { +func (t *componentStatusTransformer) hasScaleOutRunning() bool { if t.runningITS == nil || t.runningITS.Spec.Replicas == nil { - return false, false, nil + return false } - - replicas, err := component.GetReplicasStatusFunc(t.protoITS, func(status component.ReplicaStatus) bool { - return status.DataLoaded != nil && !*status.DataLoaded || - status.MemberJoined != nil && !*status.MemberJoined + return slices.ContainsFunc(t.runningITS.Status.InstanceStatus, func(inst workloads.InstanceStatus) bool { + return !ptr.Deref(inst.DataLoaded, true) || !ptr.Deref(inst.MemberJoined, true) }) - if err != nil { - return false, false, err - } - if len(replicas) == 0 { - return false, false, nil - } - - // TODO: scale-out failed - - return true, false, nil } func (t *componentStatusTransformer) hasVolumeExpansionRunning() bool { - for _, inst := range t.runningITS.Status.InstanceStatus { - if inst.VolumeExpansion { - return true - } - } - return false + return slices.ContainsFunc(t.runningITS.Status.InstanceStatus, func(inst workloads.InstanceStatus) bool { + return inst.InVolumeExpansion + }) } // hasFailedPod checks if the instance set has failed pod. diff --git a/controllers/apps/component/transformer_component_workload.go b/controllers/apps/component/transformer_component_workload.go index 0b4ae18fd0d..7b6c91584fa 100644 --- a/controllers/apps/component/transformer_component_workload.go +++ b/controllers/apps/component/transformer_component_workload.go @@ -28,7 +28,6 @@ import ( "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -116,10 +115,6 @@ func (t *componentWorkloadTransformer) reconcileWorkload(ctx context.Context, cl t.buildInstanceSetPlacementAnnotation(comp, protoITS) - if err := t.reconcileReplicasStatus(ctx, cli, synthesizedComp, runningITS, protoITS); err != nil { - return err - } - return nil } @@ -135,43 +130,6 @@ func (t *componentWorkloadTransformer) buildInstanceSetPlacementAnnotation(comp } } -func (t *componentWorkloadTransformer) reconcileReplicasStatus(ctx context.Context, cli client.Reader, - synthesizedComp *component.SynthesizedComponent, runningITS, protoITS *workloads.InstanceSet) error { - var ( - namespace = synthesizedComp.Namespace - clusterName = synthesizedComp.ClusterName - compName = synthesizedComp.Name - ) - - // HACK: sync replicas status from runningITS to protoITS - component.BuildReplicasStatus(runningITS, protoITS) - - replicas, err := func() ([]string, error) { - pods, err := component.ListOwnedPods(ctx, cli, namespace, clusterName, compName) - if err != nil { - return nil, err - } - podNameSet := sets.New[string]() - for _, pod := range pods { - podNameSet.Insert(pod.Name) - } - - desiredPodNames, err := component.GeneratePodNamesByITS(protoITS) - if err != nil { - return nil, err - } - desiredPodNameSet := sets.New(desiredPodNames...) - - return desiredPodNameSet.Intersection(podNameSet).UnsortedList(), nil - }() - if err != nil { - return err - } - - hasMemberJoinDefined, hasDataActionDefined := hasMemberJoinNDataActionDefined(synthesizedComp.LifecycleActions) - return component.StatusReplicasStatus(protoITS, replicas, hasMemberJoinDefined, hasDataActionDefined) -} - func (t *componentWorkloadTransformer) handleUpdate(transCtx *componentTransformContext, cli model.GraphClient, dag *graph.DAG, synthesizedComp *component.SynthesizedComponent, comp *appsv1.Component, runningITS, protoITS *workloads.InstanceSet) error { start, stop, err := t.handleWorkloadStartNStop(synthesizedComp, runningITS, &protoITS) diff --git a/controllers/apps/component/transformer_component_workload_ops.go b/controllers/apps/component/transformer_component_workload_ops.go index 00c95c383c9..0d7e2e5baf8 100644 --- a/controllers/apps/component/transformer_component_workload_ops.go +++ b/controllers/apps/component/transformer_component_workload_ops.go @@ -21,7 +21,6 @@ package component import ( "crypto/sha256" - "errors" "fmt" "path/filepath" "reflect" @@ -42,7 +41,6 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" "github.com/apecloud/kubeblocks/pkg/controller/model" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) type componentWorkloadOps struct { @@ -52,9 +50,7 @@ type componentWorkloadOps struct { synthesizeComp *component.SynthesizedComponent dag *graph.DAG - // runningITS is a snapshot of the InstanceSet that is already running - runningITS *workloads.InstanceSet - // protoITS is the InstanceSet object that is rebuilt from scratch during each reconcile process + runningITS *workloads.InstanceSet protoITS *workloads.InstanceSet desiredCompPodNameSet sets.Set[string] runningItsPodNameSet sets.Set[string] @@ -95,185 +91,65 @@ func (r *componentWorkloadOps) horizontalScale() error { in = r.runningItsPodNameSet.Difference(r.desiredCompPodNameSet) out = r.desiredCompPodNameSet.Difference(r.runningItsPodNameSet) ) - if in.Len() == 0 && out.Len() == 0 { - return r.postHorizontalScale() // TODO: how about consecutive horizontal scales? - } - - if in.Len() > 0 { - if err := r.scaleIn(); err != nil { - return err - } - } - - if out.Len() > 0 { - if err := r.scaleOut(); err != nil { - return err - } - } - - r.transCtx.EventRecorder.Eventf(r.component, - corev1.EventTypeNormal, - "HorizontalScale", - "start horizontal scale component %s of cluster %s from %d to %d", - r.synthesizeComp.Name, r.synthesizeComp.ClusterName, int(*r.runningITS.Spec.Replicas), r.synthesizeComp.Replicas) - - return nil -} - -func (r *componentWorkloadOps) scaleIn() error { - if r.synthesizeComp.Replicas == 0 && len(r.synthesizeComp.VolumeClaimTemplates) > 0 { - if r.synthesizeComp.PVCRetentionPolicy.WhenScaled != appsv1.RetainPersistentVolumeClaimRetentionPolicyType { - return fmt.Errorf("when intending to scale-in to 0, only the \"Retain\" option is supported for the PVC retention policy") - } - } - - deleteReplicas := r.runningItsPodNameSet.Difference(r.desiredCompPodNameSet).UnsortedList() - joinedReplicas := make([]string, 0) - err := component.DeleteReplicasStatus(r.protoITS, deleteReplicas, func(s component.ReplicaStatus) { - // has no member join defined or has joined successfully - if s.Provisioned && (s.MemberJoined == nil || *s.MemberJoined) { - joinedReplicas = append(joinedReplicas, s.Name) - } - }) - if err != nil { + if err := r.dataReplicationTask(); err != nil { return err } - - // TODO: check the component definition to determine whether we need to call leave member before deleting replicas. - if err := r.leaveMember4ScaleIn(deleteReplicas, joinedReplicas); err != nil { - r.transCtx.Logger.Error(err, "leave member at scale-in error") - return err + if in.Len() != 0 || out.Len() != 0 { + r.transCtx.EventRecorder.Eventf(r.component, + corev1.EventTypeNormal, + "HorizontalScale", + "start horizontal scale component %s of cluster %s from %d to %d", + r.synthesizeComp.Name, r.synthesizeComp.ClusterName, int(*r.runningITS.Spec.Replicas), r.synthesizeComp.Replicas) } return nil } -func (r *componentWorkloadOps) leaveMember4ScaleIn(deleteReplicas, joinedReplicas []string) error { - pods, err := component.ListOwnedPods(r.transCtx.Context, r.cli, - r.synthesizeComp.Namespace, r.synthesizeComp.ClusterName, r.synthesizeComp.Name) - if err != nil { - return err - } - - deleteReplicasSet := sets.New(deleteReplicas...) - joinedReplicasSet := sets.New(joinedReplicas...) - hasMemberLeaveDefined := r.synthesizeComp.LifecycleActions != nil && r.synthesizeComp.LifecycleActions.MemberLeave != nil - r.transCtx.Logger.Info("leave member at scaling-in", "delete replicas", deleteReplicas, - "joined replicas", joinedReplicas, "has member-leave action defined", hasMemberLeaveDefined) - - leaveErrors := make([]error, 0) - for _, pod := range pods { - if deleteReplicasSet.Has(pod.Name) { - if joinedReplicasSet.Has(pod.Name) { // else: hasn't joined yet, no need to leave - if err = r.leaveMemberForPod(pod, pods); err != nil { - leaveErrors = append(leaveErrors, err) - } - joinedReplicasSet.Delete(pod.Name) - } - deleteReplicasSet.Delete(pod.Name) - } - } - - if hasMemberLeaveDefined && len(joinedReplicasSet) > 0 { - leaveErrors = append(leaveErrors, - fmt.Errorf("some replicas have joined but not leaved since the Pod object is not exist: %v", sets.List(joinedReplicasSet))) - } - if len(leaveErrors) > 0 { - return intctrlutil.NewRequeueError(time.Second, fmt.Sprintf("%v", leaveErrors)) +func (r *componentWorkloadOps) dataReplicationTask() error { + _, hasDataActionDefined := hasMemberJoinNDataActionDefined(r.synthesizeComp.LifecycleActions) + if !hasDataActionDefined { + return nil } - return nil -} -func (r *componentWorkloadOps) leaveMemberForPod(pod *corev1.Pod, pods []*corev1.Pod) error { var ( - synthesizedComp = r.synthesizeComp - lifecycleActions = synthesizedComp.LifecycleActions + // new replicas to be submitted to InstanceSet + newReplicas = r.desiredCompPodNameSet.Difference(r.runningItsPodNameSet).UnsortedList() + // replicas can be used as the source replica to dump data + sourceReplicas = sets.New[string]() + // replicas are in provisioning and the data has not been loaded + provisioningReplicas []string + // replicas are not provisioned + unprovisionedReplicas = r.runningItsPodNameSet.Clone() ) - - switchover := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { - if lifecycleActions.Switchover == nil { - return nil + for _, replica := range r.runningITS.Status.InstanceStatus { + if !r.runningItsPodNameSet.Has(replica.PodName) { + continue // to be deleted } - err := lfa.Switchover(r.transCtx.Context, r.cli, nil, "") - if err == nil { - r.transCtx.Logger.Info("succeed to call switchover action", "pod", pod.Name) - } else if !errors.Is(err, lifecycle.ErrActionNotDefined) { - r.transCtx.Logger.Info("failed to call switchover action, ignore it", "pod", pod.Name, "error", err) + if replica.Provisioned { + unprovisionedReplicas.Delete(replica.PodName) } - return nil - } - - leaveMember := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { - if lifecycleActions.MemberLeave == nil { - return nil + if replica.DataLoaded != nil && !*replica.DataLoaded { + provisioningReplicas = append(provisioningReplicas, replica.PodName) + continue } - err := lfa.MemberLeave(r.transCtx.Context, r.cli, nil) - if err != nil { - if errors.Is(err, lifecycle.ErrActionNotDefined) { - return nil - } - return err + if replica.MemberJoined == nil || *replica.MemberJoined { + sourceReplicas.Insert(replica.PodName) } - r.transCtx.Logger.Info("succeed to call leave member action", "pod", pod.Name) - return nil - } - - if lifecycleActions == nil || (lifecycleActions.Switchover == nil && lifecycleActions.MemberLeave == nil) { - return nil - } - - lfa, err := lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - lifecycleActions, synthesizedComp.TemplateVars, pod, pods...) - if err != nil { - return err - } - - if err = switchover(lfa, pod); err != nil { - return err - } - if err = leaveMember(lfa, pod); err != nil { - return err - } - return nil -} - -func (r *componentWorkloadOps) scaleOut() error { - if err := r.buildDataReplicationTask(); err != nil { - return err - } - - // replicas to be created - newReplicas := r.desiredCompPodNameSet.Difference(r.runningItsPodNameSet).UnsortedList() - hasMemberJoinDefined, hasDataActionDefined := hasMemberJoinNDataActionDefined(r.synthesizeComp.LifecycleActions) - return component.NewReplicasStatus(r.protoITS, newReplicas, hasMemberJoinDefined, hasDataActionDefined) -} - -func (r *componentWorkloadOps) buildDataReplicationTask() error { - _, hasDataActionDefined := hasMemberJoinNDataActionDefined(r.synthesizeComp.LifecycleActions) - if !hasDataActionDefined { - return nil } - // replicas to be provisioned - newReplicas := r.desiredCompPodNameSet.Difference(r.runningItsPodNameSet).UnsortedList() - // replicas in provisioning that the data has not been loaded - provisioningReplicas, err := component.GetReplicasStatusFunc(r.protoITS, func(s component.ReplicaStatus) bool { - return s.DataLoaded != nil && !*s.DataLoaded - }) - if err != nil { - return err - } - - if len(newReplicas) == 0 && len(provisioningReplicas) == 0 { + if r.runningITS.IsInInitializing() || len(newReplicas) == 0 && unprovisionedReplicas.Len() == 0 && len(provisioningReplicas) == 0 { return nil } - // the source replica - source, err := r.sourceReplica(r.synthesizeComp.LifecycleActions.DataDump, provisioningReplicas) + // choose the source replica + source, err := r.sourceReplica(r.synthesizeComp.LifecycleActions.DataDump, sourceReplicas) if err != nil { return err } - replicas := append(slices.Clone(newReplicas), provisioningReplicas...) + replicas := slices.Clone(newReplicas) + replicas = append(replicas, unprovisionedReplicas.UnsortedList()...) + replicas = append(replicas, provisioningReplicas...) + slices.Sort(replicas) parameters, err := component.NewReplicaTask(r.synthesizeComp.FullCompName, r.synthesizeComp.Generation, source, replicas) if err != nil { return err @@ -288,118 +164,33 @@ func (r *componentWorkloadOps) buildDataReplicationTask() error { return createOrUpdateEnvConfigMap(transCtx, r.dag, nil, parameters) } -func (r *componentWorkloadOps) sourceReplica(dataDump *appsv1.Action, provisioningReplicas []string) (*corev1.Pod, error) { - pods, err := component.ListOwnedPods(r.transCtx.Context, r.cli, - r.synthesizeComp.Namespace, r.synthesizeComp.ClusterName, r.synthesizeComp.Name) - if err != nil { - return nil, err - } - if len(provisioningReplicas) > 0 { - // exclude provisioning replicas - pods = slices.DeleteFunc(pods, func(pod *corev1.Pod) bool { - return slices.Contains(provisioningReplicas, pod.Name) - }) +func (r *componentWorkloadOps) sourceReplica(dataDump *appsv1.Action, sourceReplicas sets.Set[string]) (lifecycle.Replica, error) { + var replicas []lifecycle.Replica + for i, inst := range r.runningITS.Status.InstanceStatus { + if sourceReplicas.Has(inst.PodName) { + replicas = append(replicas, &lifecycleReplica{ + synthesizedComp: r.synthesizeComp, + instance: r.runningITS.Status.InstanceStatus[i], + }) + } } - if len(pods) > 0 { + if len(replicas) > 0 { if len(dataDump.TargetPodSelector) == 0 && (dataDump.Exec == nil || len(dataDump.Exec.TargetPodSelector) == 0) { dataDump.TargetPodSelector = appsv1.AnyReplica } // TODO: idempotence for provisioning replicas - pods, err = lifecycle.SelectTargetPods(pods, nil, dataDump) + var err error + replicas, err = lifecycle.SelectTargetPods(replicas, nil, dataDump) if err != nil { return nil, err } - if len(pods) > 0 { - return pods[0], nil + if len(replicas) > 0 { + return replicas[0], nil } } return nil, fmt.Errorf("no available pod to dump data") } -func (r *componentWorkloadOps) postHorizontalScale() error { - if err := r.postScaleOut(); err != nil { - return err - } - return nil -} - -func (r *componentWorkloadOps) postScaleOut() error { - if err := r.buildDataReplicationTask(); err != nil { - return err - } - if err := r.joinMember4ScaleOut(); err != nil { - return err - } - return nil -} - -func (r *componentWorkloadOps) joinMember4ScaleOut() error { - pods, err := component.ListOwnedPods(r.transCtx.Context, r.cli, - r.synthesizeComp.Namespace, r.synthesizeComp.ClusterName, r.synthesizeComp.Name) - if err != nil { - return err - } - - joinErrors := make([]error, 0) - if err = component.UpdateReplicasStatusFunc(r.protoITS, func(replicas *component.ReplicasStatus) error { - for _, pod := range pods { - i := slices.IndexFunc(replicas.Status, func(r component.ReplicaStatus) bool { - return r.Name == pod.Name - }) - if i < 0 { - continue // the pod is not in the replicas status? - } - - status := replicas.Status[i] - if status.MemberJoined == nil || *status.MemberJoined { - continue // no need to join or already joined - } - - // TODO: should wait for the data to be loaded before joining the member? - - if err := r.joinMemberForPod(pod, pods); err != nil { - joinErrors = append(joinErrors, fmt.Errorf("pod %s: %w", pod.Name, err)) - } else { - replicas.Status[i].MemberJoined = ptr.To(true) - } - } - - notJoinedReplicas := make([]string, 0) - for _, r := range replicas.Status { - if r.MemberJoined != nil && !*r.MemberJoined { - notJoinedReplicas = append(notJoinedReplicas, r.Name) - } - } - if len(notJoinedReplicas) > 0 { - joinErrors = append(joinErrors, fmt.Errorf("some replicas have not joined: %v", notJoinedReplicas)) - } - return nil - }); err != nil { - return err - } - - if len(joinErrors) > 0 { - return intctrlutil.NewRequeueError(time.Second, fmt.Sprintf("%v", joinErrors)) - } - return nil -} - -func (r *componentWorkloadOps) joinMemberForPod(pod *corev1.Pod, pods []*corev1.Pod) error { - synthesizedComp := r.synthesizeComp - lfa, err := lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, pod, pods...) - if err != nil { - return err - } - if err = lfa.MemberJoin(r.transCtx.Context, r.cli, nil); err != nil { - if !errors.Is(err, lifecycle.ErrActionNotDefined) { - return err - } - } - r.transCtx.Logger.Info("succeed to join member for pod", "pod", pod.Name) - return nil -} - func (r *componentWorkloadOps) reconfigure() error { runningObjs, protoObjs, err := prepareFileTemplateObjects(r.transCtx) if err != nil { diff --git a/controllers/apps/component/transformer_component_workload_test.go b/controllers/apps/component/transformer_component_workload_test.go deleted file mode 100644 index 649a2047624..00000000000 --- a/controllers/apps/component/transformer_component_workload_test.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd -This file is part of KubeBlocks project -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package component - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/golang/mock/gomock" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - appsutil "github.com/apecloud/kubeblocks/controllers/apps/util" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/component" - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" - kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client" - kbagentproto "github.com/apecloud/kubeblocks/pkg/kbagent/proto" - testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" -) - -var _ = Describe("Component Workload Operations Test", func() { - const ( - clusterName = "test-cluster" - compName = "test-comp" - kubeblocksName = "kubeblocks" - ) - - var ( - reader *appsutil.MockReader - dag *graph.DAG - comp *appsv1.Component - synthesizeComp *component.SynthesizedComponent - ) - - roles := []appsv1.ReplicaRole{ - {Name: "leader", UpdatePriority: 3}, - {Name: "follower", UpdatePriority: 2}, - } - - newDAG := func(graphCli model.GraphClient, comp *appsv1.Component) *graph.DAG { - d := graph.NewDAG() - graphCli.Root(d, comp, comp, model.ActionStatusPtr()) - return d - } - - BeforeEach(func() { - reader = &appsutil.MockReader{} - comp = &appsv1.Component{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testCtx.DefaultNamespace, - Name: constant.GenerateClusterComponentName(clusterName, compName), - Labels: map[string]string{ - constant.AppManagedByLabelKey: constant.AppName, - constant.AppInstanceLabelKey: clusterName, - constant.KBAppComponentLabelKey: compName, - }, - }, - Spec: appsv1.ComponentSpec{}, - } - - synthesizeComp = &component.SynthesizedComponent{ - Namespace: testCtx.DefaultNamespace, - ClusterName: clusterName, - Name: compName, - Roles: roles, - LifecycleActions: &appsv1.ComponentLifecycleActions{ - MemberJoin: &appsv1.Action{ - Exec: &appsv1.ExecAction{ - Image: "test-image", - }, - }, - MemberLeave: &appsv1.Action{ - Exec: &appsv1.ExecAction{ - Image: "test-image", - }, - }, - Switchover: &appsv1.Action{ - Exec: &appsv1.ExecAction{ - Image: "test-image", - }, - }, - }, - } - - graphCli := model.NewGraphClient(reader) - dag = newDAG(graphCli, comp) - }) - - Context("Member Leave Operations", func() { - var ( - ops *componentWorkloadOps - pod0 *corev1.Pod - pod1 *corev1.Pod - pods []*corev1.Pod - ) - - BeforeEach(func() { - pod0 = testapps.NewPodFactory(testCtx.DefaultNamespace, "test-pod-0"). - AddContainer(corev1.Container{ - Image: "test-image", - Name: "test-container", - }). - AddLabels( - constant.AppManagedByLabelKey, kubeblocksName, - constant.AppInstanceLabelKey, clusterName, - constant.KBAppComponentLabelKey, compName, - ). - GetObject() - - pod1 = testapps.NewPodFactory(testCtx.DefaultNamespace, "test-pod-1"). - AddContainer(corev1.Container{ - Image: "test-image", - Name: "test-container", - }). - AddLabels( - constant.AppManagedByLabelKey, kubeblocksName, - constant.AppInstanceLabelKey, clusterName, - constant.KBAppComponentLabelKey, compName, - ). - GetObject() - - pods = []*corev1.Pod{pod0, pod1} - - container := corev1.Container{ - Name: "mock-container-name", - Image: testapps.ApeCloudMySQLImage, - ImagePullPolicy: corev1.PullIfNotPresent, - } - - mockITS := testapps.NewInstanceSetFactory(testCtx.DefaultNamespace, - "test-its", clusterName, compName). - AddFinalizers([]string{constant.DBClusterFinalizerName}). - AddContainer(container). - AddAppInstanceLabel(clusterName). - AddAppComponentLabel(compName). - AddAppManagedByLabel(). - SetReplicas(2). - SetRoles(roles). - GetObject() - - ops = &componentWorkloadOps{ - transCtx: &componentTransformContext{ - Context: ctx, - Logger: logger, - EventRecorder: clusterRecorder, - }, - cli: k8sClient, - component: comp, - synthesizeComp: synthesizeComp, - runningITS: mockITS, - protoITS: mockITS.DeepCopy(), - dag: dag, - } - }) - - It("should handle switchover for when scale in", func() { - testapps.MockKBAgentClient(func(recorder *kbacli.MockClientMockRecorder) { - recorder.Action(gomock.Any(), gomock.Any()).Times(2).DoAndReturn(func(ctx context.Context, req kbagentproto.ActionRequest) (kbagentproto.ActionResponse, error) { - GinkgoWriter.Printf("ActionRequest: %#v\n", req) - switch req.Action { - case "switchover": - Expect(req.Parameters["KB_SWITCHOVER_CURRENT_NAME"]).Should(Equal(pod1.Name)) - case "memberLeave": - Expect(req.Parameters["KB_LEAVE_MEMBER_POD_NAME"]).Should(Equal(pod1.Name)) - } - rsp := kbagentproto.ActionResponse{Message: "mock success"} - return rsp, nil - }) - }) - - By("setting up leader pod") - pod1.Labels[constant.RoleLabelKey] = "follower" - pod1.Labels[constant.RoleLabelKey] = "leader" - - By("executing leave member for leader") - Expect(ops.leaveMemberForPod(pod1, pods)).Should(Succeed()) - }) - }) -}) diff --git a/controllers/apps/component/utils.go b/controllers/apps/component/utils.go index 7d85e972f67..aeac01940fc 100644 --- a/controllers/apps/component/utils.go +++ b/controllers/apps/component/utils.go @@ -22,12 +22,18 @@ package component import ( "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" + "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/kbagent" ) const ( @@ -95,3 +101,59 @@ func isCompDeleting(comp *appsv1.Component) bool { } return comp.Spec.TerminationPolicy != appsv1.DoNotTerminate } + +func newLifecycleAction(action string, synthesizedComp *component.SynthesizedComponent, obj client.Object) (lifecycle.Lifecycle, error) { + if obj == nil { + return nil, fmt.Errorf("the workload obj is nil to calling the %s action", action) + } + its := obj.(*workloads.InstanceSet) + if len(its.Status.InstanceStatus) == 0 { + // TODO: (good-first-issue) we should handle the case that the component has no pods + return nil, fmt.Errorf("has no pods to calling the %s action", action) + } + var replicas []lifecycle.Replica + for i := range its.Status.InstanceStatus { + replicas = append(replicas, &lifecycleReplica{ + synthesizedComp: synthesizedComp, + instance: its.Status.InstanceStatus[i], + }) + } + return lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, + synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, nil, replicas...) +} + +type lifecycleReplica struct { + synthesizedComp *component.SynthesizedComponent + instance workloads.InstanceStatus +} + +func (r *lifecycleReplica) Namespace() string { + return r.synthesizedComp.Namespace +} + +func (r *lifecycleReplica) Name() string { + return r.instance.PodName +} + +func (r *lifecycleReplica) Role() string { + return r.instance.Role +} + +func (r *lifecycleReplica) Endpoint() (string, int32, error) { + host := intctrlutil.PodFQDN(r.synthesizedComp.Namespace, r.synthesizedComp.FullCompName, r.instance.PodName) + pod := corev1.Pod{ + Spec: *r.synthesizedComp.PodSpec, // TODO: ports for the host-network have been written back to the pod spec? + } + port, err := intctrlutil.GetPortByName(pod, kbagent.ContainerName, kbagent.DefaultHTTPPortName) + return host, port, err +} + +func (r *lifecycleReplica) StreamingEndpoint() (string, int32, error) { + // TODO: should use a component service + host := intctrlutil.PodFQDN(r.synthesizedComp.Namespace, r.synthesizedComp.FullCompName, r.instance.PodName) + pod := corev1.Pod{ + Spec: *r.synthesizedComp.PodSpec, // TODO: ports for the host-network have been written back to the pod spec? + } + port, err := intctrlutil.GetPortByName(pod, kbagent.ContainerName, kbagent.DefaultStreamingPortName) + return host, port, err +} diff --git a/controllers/workloads/instance_controller.go b/controllers/workloads/instance_controller.go index 5bedf18264e..b16870e91aa 100644 --- a/controllers/workloads/instance_controller.go +++ b/controllers/workloads/instance_controller.go @@ -94,6 +94,7 @@ func (r *InstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c Do(instance.NewFixMetaReconciler()). Do(instance.NewDeletionReconciler()). Do(instance.NewRevisionUpdateReconciler()). + Do(instance.NewMembershipReconciler()). Do(instance.NewStatusReconciler()). // Do(instance.NewRevisionUpdateReconciler()). Do(instance.NewAssistantObjectReconciler()). diff --git a/controllers/workloads/instance_controller_test.go b/controllers/workloads/instance_controller_test.go index 2c28a0660d7..f0e99d8c44e 100644 --- a/controllers/workloads/instance_controller_test.go +++ b/controllers/workloads/instance_controller_test.go @@ -498,18 +498,158 @@ var _ = Describe("Instance Controller", func() { // It("reconfigure", func() { // // TODO // }) - // - // It("member join", func() { - // // TODO - // }) - // - // It("member leave", func() { - // // TODO - // }) - // - // It("data load (source ref)", func() { - // // TODO - // }) + }) + + Context("membership", func() { + var ( + memberJoin = false + memberLeave = false + memberJoinError error + memberLeaveError error + ) + + BeforeEach(func() { + testapps.MockKBAgentClient(func(recorder *kbacli.MockClientMockRecorder) { + recorder.Action(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, req kbagentproto.ActionRequest) (kbagentproto.ActionResponse, error) { + rsp := kbagentproto.ActionResponse{} + switch req.Action { + case "memberJoin": + if memberJoinError != nil { + return rsp, memberJoinError + } + memberJoin = true + case "memberLeave": + if memberLeaveError != nil { + return rsp, memberLeaveError + } + memberLeave = true + } + return rsp, nil + }).AnyTimes() + }) + }) + + AfterEach(func() { + kbacli.UnsetMockClient() + memberJoin = false + memberLeave = false + memberJoinError = nil + memberLeaveError = nil + }) + + setup := func(withMemberAction bool) { + createInstObj(instName, func(f *testapps.MockInstanceFactory) { + if withMemberAction { + f.SetLifecycleActions(&workloads.LifecycleActions{ + MemberJoin: testapps.NewLifecycleAction("member-join"), + MemberLeave: testapps.NewLifecycleAction("member-leave"), + }) + } + }) + + mockPodReady(instObj.Namespace, instObj.Name) + podKey := instKey + podObj := &corev1.Pod{} + Expect(k8sClient.Get(ctx, podKey, podObj)).Should(Succeed()) + } + + It("create - w/o member join", func() { + setup(false) + + By("check instance status") + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Status.Provisioned).Should(BeTrue()) + g.Expect(inst.Status.MemberJoined).Should(BeNil()) + })).Should(Succeed()) + + By("check member join action NOT be triggered") + Consistently(memberJoin).Should(BeFalse()) + }) + + PIt("create - w/ member join", func() { + setup(true) + + By("check instance status") + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Status.Provisioned).Should(BeTrue()) + g.Expect(inst.Status.MemberJoined).ShouldNot(BeNil()) + g.Expect(*inst.Status.MemberJoined).Should(BeTrue()) + })).Should(Succeed()) + + By("check member join action be triggered") + Eventually(memberJoin).Should(BeTrue()) + }) + + It("delete w/o member leave", func() { + setup(false) + + By("check instance status") + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Status.Provisioned).Should(BeTrue()) + g.Expect(inst.Status.MemberJoined).Should(BeNil()) + })).Should(Succeed()) + + By("check member join action NOT be triggered") + Consistently(memberJoin).Should(BeFalse()) + + By("delete instance") + Expect(k8sClient.Delete(ctx, instObj)).Should(Succeed()) + + By("wait for instance to be deleted") + Eventually(testapps.CheckObjExists(&testCtx, instKey, &workloads.Instance{}, false)).Should(Succeed()) + + By("check member leave action NOT be triggered") + Consistently(memberLeave).Should(BeFalse()) + }) + + PIt("delete w/ member leave - joined replicas", func() { + setup(true) + + By("check instance status") + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Status.Provisioned).Should(BeTrue()) + g.Expect(inst.Status.MemberJoined).ShouldNot(BeNil()) + g.Expect(*inst.Status.MemberJoined).Should(BeTrue()) + })).Should(Succeed()) + + By("check member join action be triggered") + Eventually(memberJoin).Should(BeTrue()) + + By("delete instance") + Expect(k8sClient.Delete(ctx, instObj)).Should(Succeed()) + + By("wait for instance to be deleted") + Eventually(testapps.CheckObjExists(&testCtx, instKey, &workloads.Instance{}, false)).Should(Succeed()) + + By("check member leave action be triggered") + Eventually(memberLeave).Should(BeTrue()) + }) + + PIt("delete w/ member leave - unjoined replicas", func() { + By("mock member-join action error") + memberJoinError = fmt.Errorf("mock member-join action error") + + setup(true) + + By("check instance status") + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Status.Provisioned).Should(BeTrue()) + g.Expect(inst.Status.MemberJoined).ShouldNot(BeNil()) + g.Expect(*inst.Status.MemberJoined).Should(BeFalse()) + })).Should(Succeed()) + + By("check member join action NOT be triggered") + Consistently(memberJoin).Should(BeFalse()) + + By("delete instance") + Expect(k8sClient.Delete(ctx, instObj)).Should(Succeed()) + + By("wait for instance to be deleted") + Eventually(testapps.CheckObjExists(&testCtx, instKey, &workloads.Instance{}, false)).Should(Succeed()) + + By("check member leave action NOT be triggered") + Consistently(memberLeave).Should(BeFalse()) + }) }) }) @@ -545,6 +685,13 @@ func mockPodReady(namespace, podName string) { mockPodStatusReady(namespace, podName, metav1.Now()) } +func mockPodsReady(namespace string, podNames ...string) { + By(fmt.Sprintf("mock pods ready: %s", strings.Join(podNames, ","))) + for _, podName := range podNames { + mockPodStatusReady(namespace, podName, metav1.Now()) + } +} + func mockPodReadyNAvailable(namespace, podName string, minReadySeconds int32) { By(fmt.Sprintf("mock pod ready & available: %s", podName)) mockPodStatusReady(namespace, podName, metav1.NewTime(time.Now().Add(time.Duration(-1*(minReadySeconds+1))*time.Second))) @@ -561,3 +708,17 @@ func mockPodReadyNAvailableWithRole(namespace, podName, role string, minReadySec pod.Labels[constant.RoleLabelKey] = role })()).Should(Succeed()) } + +func mockPodsReadyNAvailableWithRole(namespace, role string, minReadySeconds int32, podNames ...string) { + By(fmt.Sprintf("mock pods ready & available with role: %s, %s", strings.Join(podNames, ","), role)) + for _, podName := range podNames { + mockPodStatusReady(namespace, podName, metav1.NewTime(time.Now().Add(time.Duration(-1*(minReadySeconds+1))*time.Second))) + podKey := types.NamespacedName{ + Namespace: namespace, + Name: podName, + } + Eventually(testapps.GetAndChangeObj(&testCtx, podKey, func(pod *corev1.Pod) { + pod.Labels[constant.RoleLabelKey] = role + })()).Should(Succeed()) + } +} diff --git a/controllers/workloads/instanceset_controller.go b/controllers/workloads/instanceset_controller.go index 019ab3a9ab8..92ca8e78f2e 100644 --- a/controllers/workloads/instanceset_controller.go +++ b/controllers/workloads/instanceset_controller.go @@ -83,10 +83,11 @@ func (r *InstanceSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) Do(instanceset.NewFixMetaReconciler()). Do(instanceset.NewDeletionReconciler()). Do(instanceset.NewValidationReconciler()). + Do(instanceset.NewMembershipReconciler()). Do(instanceset.NewStatusReconciler()). Do(instanceset.NewRevisionUpdateReconciler()). Do(instanceset.NewAssistantObjectReconciler()). - Do(instanceset.NewReplicasAlignmentReconciler()). + Do(instanceset.NewInstanceAlignmentReconciler()). Do(instanceset.NewUpdateReconciler()). Commit() diff --git a/controllers/workloads/instanceset_controller_2.go b/controllers/workloads/instanceset_controller2.go similarity index 98% rename from controllers/workloads/instanceset_controller_2.go rename to controllers/workloads/instanceset_controller2.go index 5dfae30bc95..06756b94dcc 100644 --- a/controllers/workloads/instanceset_controller_2.go +++ b/controllers/workloads/instanceset_controller2.go @@ -58,7 +58,7 @@ func (r *InstanceSetReconciler2) Reconcile(ctx context.Context, req ctrl.Request Do(instanceset2.NewValidationReconciler()). Do(instanceset2.NewStatusReconciler()). Do(instanceset2.NewRevisionUpdateReconciler()). - Do(instanceset2.NewAssistantObjectReconciler()). + Do(instanceset2.NewHeadlessServiceReconciler()). Do(instanceset2.NewAlignmentReconciler()). Do(instanceset2.NewUpdateReconciler()). Commit() diff --git a/controllers/workloads/instanceset_controller_2_test.go b/controllers/workloads/instanceset_controller2_test.go similarity index 51% rename from controllers/workloads/instanceset_controller_2_test.go rename to controllers/workloads/instanceset_controller2_test.go index 6606f4cd612..4b13442e5c5 100644 --- a/controllers/workloads/instanceset_controller_2_test.go +++ b/controllers/workloads/instanceset_controller2_test.go @@ -20,26 +20,32 @@ along with this program. If not, see . package workloads import ( + "context" "fmt" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/golang/mock/gomock" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/model" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/generics" + kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client" + kbagentproto "github.com/apecloud/kubeblocks/pkg/kbagent/proto" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" ) @@ -64,7 +70,10 @@ var _ = Describe("InstanceSet Controller 2", func() { // namespaced testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.InstanceSetSignature, true, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.InstanceSignature, true, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ServiceSignature, true, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ConfigMapSignature, true, inNS, ml) } BeforeEach(func() { @@ -106,16 +115,12 @@ var _ = Describe("InstanceSet Controller 2", func() { return fmt.Sprintf("%s-%d", itsKey.Name, ordinal) } - mockPodsReady := func() { + podNames := func() []string { + podNames := make([]string, 0) for i := int32(0); i < replicas; i++ { - mockPodReady(itsObj.Namespace, podName(i)) - } - } - - mockPodsReadyNAvailableWithRole := func() { - for i := int32(0); i < replicas; i++ { - mockPodReadyNAvailableWithRole(itsObj.Namespace, podName(i), "leader", 0) + podNames = append(podNames, podName(i)) } + return podNames } Context("provision", func() { @@ -134,7 +139,7 @@ var _ = Describe("InstanceSet Controller 2", func() { g.Expect(its.IsInstanceSetReady()).Should(BeFalse()) })).Should(Succeed()) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -163,7 +168,7 @@ var _ = Describe("InstanceSet Controller 2", func() { g.Expect(its.IsInstanceSetReady()).Should(BeFalse()) })).Should(Succeed()) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its not ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -171,7 +176,7 @@ var _ = Describe("InstanceSet Controller 2", func() { g.Expect(its.IsInstanceSetReady()).Should(BeFalse()) })).Should(Succeed()) - mockPodsReadyNAvailableWithRole() + mockPodsReadyNAvailableWithRole(itsObj.Namespace, "leader", 0, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -235,7 +240,7 @@ var _ = Describe("InstanceSet Controller 2", func() { }) }) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -289,7 +294,7 @@ var _ = Describe("InstanceSet Controller 2", func() { It("scale-in", func() { createITSObj(itsName) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -317,7 +322,7 @@ var _ = Describe("InstanceSet Controller 2", func() { }) }) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -356,7 +361,7 @@ var _ = Describe("InstanceSet Controller 2", func() { }) }) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -388,7 +393,7 @@ var _ = Describe("InstanceSet Controller 2", func() { It("scale-out", func() { createITSObj(itsName) - mockPodsReady() + mockPodsReady(itsObj.Namespace, podNames()...) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -418,4 +423,374 @@ var _ = Describe("InstanceSet Controller 2", func() { })).Should(Succeed()) }) }) + + Context("membership", func() { + var ( + memberJoinReplicas = sets.New[string]() + memberLeaveReplicas = sets.New[string]() + memberJoinError, memberLeaveError error + ) + + BeforeEach(func() { + testapps.MockKBAgentClient(func(recorder *kbacli.MockClientMockRecorder) { + recorder.Action(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, req kbagentproto.ActionRequest) (kbagentproto.ActionResponse, error) { + rsp := kbagentproto.ActionResponse{} + switch req.Action { + case "memberJoin": + if memberJoinError != nil { + return rsp, memberJoinError + } + memberJoinReplicas.Insert(req.Parameters["KB_JOIN_MEMBER_POD_NAME"]) + case "memberLeave": + if memberLeaveError != nil { + return rsp, memberLeaveError + } + memberLeaveReplicas.Insert(req.Parameters["KB_LEAVE_MEMBER_POD_NAME"]) + } + return rsp, nil + }).AnyTimes() + }) + }) + + AfterEach(func() { + kbacli.UnsetMockClient() + memberJoinReplicas.Clear() + memberLeaveReplicas.Clear() + memberJoinError = nil + memberLeaveError = nil + }) + + setup := func(initReplicas int32, withMemberAction bool, processors ...func(factory *testapps.MockInstanceSetFactory)) { + createITSObj(itsName, func(f *testapps.MockInstanceSetFactory) { + f.SetReplicas(initReplicas). + SetInstanceUpdateStrategy(&workloads.InstanceUpdateStrategy{ + Type: kbappsv1.RollingUpdateStrategyType, + }) + if withMemberAction { + f.SetLifecycleActions(&kbappsv1.ComponentLifecycleActions{ + MemberJoin: testapps.NewLifecycleAction("member-join"), + MemberLeave: testapps.NewLifecycleAction("member-leave"), + }, nil) + } + }) + + replicas := make([]string, 0) + for i := int32(0); i < initReplicas; i++ { + replicas = append(replicas, podName(i)) + } + mockPodsReady(itsObj.Namespace, replicas...) + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + } + + It("provision w/o member join", func() { + var ( + initReplicas = int32(1) + ) + + setup(initReplicas, false) + + By("check instance spec") + for i := int32(0); i < initReplicas; i++ { + instKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Spec.LifecycleActions).Should(BeNil()) + })).Should(Succeed()) + } + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + }) + + It("provision w/ member join", func() { + var ( + initReplicas = int32(1) + ) + + setup(initReplicas, true) + + By("check instance spec") + for i := int32(0); i < initReplicas; i++ { + instKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObj(&testCtx, instKey, func(g Gomega, inst *workloads.Instance) { + g.Expect(inst.Spec.LifecycleActions).ShouldNot(BeNil()) + })).Should(Succeed()) + } + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + }) + + It("scale-out w/o member join", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, false) + + By("scale-out") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + }) + + It("scale-out w/ member join", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, true) + + By("scale-out") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member join action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberJoinReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i))).Should(BeTrue()) + } + }) + + PIt("scale-out w/ member join + data load", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + By("mock assistant objects") + assistantObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-env", itsObj.Name), + }, + Data: map[string]string{ + "version": "v1.0.0", + }, + } + Expect(testCtx.CreateObj(testCtx.Ctx, assistantObj)).Should(Succeed()) + gvk, _ := model.GetGVKName(assistantObj) + assistantObjs := []corev1.ObjectReference{ + { + APIVersion: gvk.Version, + Kind: gvk.Kind, + Name: gvk.Name, + }, + } + + setup(initReplicas, true, func(f *testapps.MockInstanceSetFactory) { + f.SetLifecycleActions(&kbappsv1.ComponentLifecycleActions{ + MemberJoin: testapps.NewLifecycleAction("member-join"), + MemberLeave: testapps.NewLifecycleAction("member-leave"), + DataDump: testapps.NewLifecycleAction("data-dump"), + DataLoad: testapps.NewLifecycleAction("data-load"), + }, nil). + SetInstanceAssistantObjects(assistantObjs) + }) + + By("scale-out") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member join action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberJoinReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i))).Should(BeTrue()) + } + }) + + It("scale-in w/o member leave", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, false) + + By("scale-out first") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + scaledReplicas := make([]string, 0) + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + scaledReplicas = append(scaledReplicas, podKey.Name) + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + mockPodsReady(itsObj.Namespace, scaledReplicas...) + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(initReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member leave action NOT be triggered") + Consistently(memberLeaveReplicas).Should(BeEmpty()) + }) + + It("scale-in w/ member leave", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, true) + + By("scale-out first") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + scaledReplicas := make([]string, 0) + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + scaledReplicas = append(scaledReplicas, podKey.Name) + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + mockPodsReady(itsObj.Namespace, scaledReplicas...) + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + + By("check member join action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberJoinReplicas.Has(podName(i))).Should(BeTrue()) + } + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(initReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: podName(i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member leave action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberLeaveReplicas.Has(podName(i))).Should(BeTrue()) + } + }) + }) }) diff --git a/controllers/workloads/instanceset_controller_test.go b/controllers/workloads/instanceset_controller_test.go index 68fbcb24d6e..47066778a10 100644 --- a/controllers/workloads/instanceset_controller_test.go +++ b/controllers/workloads/instanceset_controller_test.go @@ -35,6 +35,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -106,35 +107,6 @@ var _ = Describe("InstanceSet Controller", func() { ).Should(Succeed()) } - mockPodReady := func(podNames ...string) { - By("mock pods ready") - for _, podName := range podNames { - podKey := types.NamespacedName{ - Namespace: itsObj.Namespace, - Name: podName, - } - Eventually(testapps.GetAndChangeObjStatus(&testCtx, podKey, func(pod *corev1.Pod) { - pod.Status.Phase = corev1.PodRunning - pod.Status.Conditions = []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - }, - } - pod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Name: pod.Spec.Containers[0].Name, - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - Image: pod.Spec.Containers[0].Image, - }, - } - })()).Should(Succeed()) - } - } - Context("reconciliation", func() { It("should reconcile well", func() { name := "test-instance-set" @@ -209,7 +181,7 @@ var _ = Describe("InstanceSet Controller", func() { Name: fmt.Sprintf("%s-2", itsObj.Name), }, } - mockPodReady(podsKey[0].Name, podsKey[1].Name, podsKey[2].Name) + mockPodsReady(itsObj.Namespace, podsKey[0].Name, podsKey[1].Name, podsKey[2].Name) By("check its ready") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -231,7 +203,7 @@ var _ = Describe("InstanceSet Controller", func() { })).Should(Succeed()) // mock new pod ready - mockPodReady(podKey.Name) + mockPodsReady(itsObj.Namespace, podKey.Name) By(fmt.Sprintf("check its status updated: %s", podKey.Name)) Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -412,12 +384,39 @@ var _ = Describe("InstanceSet Controller", func() { Context("reconfigure", func() { It("instance status", func() { createITSObj(itsName, func(f *testapps.MockInstanceSetFactory) { - f.AddConfigs(workloads.ConfigTemplate{ + f.SetInstanceUpdateStrategy(&workloads.InstanceUpdateStrategy{ + Type: kbappsv1.RollingUpdateStrategyType, + }).AddConfigs(workloads.ConfigTemplate{ Name: "server", Generation: int64(1), }) }) + By("mock pods running and available") + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-0", itsObj.Name), + } + Expect(testapps.GetAndChangeObjStatus(&testCtx, podKey, func(pod *corev1.Pod) { + pod.Status.Phase = corev1.PodRunning + pod.Status.Conditions = []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + } + pod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Name: pod.Spec.Containers[0].Name, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Image: pod.Spec.Containers[0].Image, + }, + } + })()).ShouldNot(HaveOccurred()) + By("check instance status") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { g.Expect(its.Status.InstanceStatus).Should(HaveLen(1)) @@ -429,6 +428,7 @@ var _ = Describe("InstanceSet Controller", func() { Generation: int64(1), }, }, + Provisioned: true, })) })).Should(Succeed()) }) @@ -504,6 +504,7 @@ var _ = Describe("InstanceSet Controller", func() { Generation: int64(2), }, }, + Provisioned: true, })) })).Should(Succeed()) @@ -536,6 +537,7 @@ var _ = Describe("InstanceSet Controller", func() { Generation: int64(128), }, }, + Provisioned: true, })) })).Should(Succeed()) @@ -578,7 +580,7 @@ var _ = Describe("InstanceSet Controller", func() { }...) }) - mockPodReady(fmt.Sprintf("%s-0", itsObj.Name)) + mockPodsReady(itsObj.Namespace, fmt.Sprintf("%s-0", itsObj.Name)) By("check the init instance status") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { @@ -595,6 +597,7 @@ var _ = Describe("InstanceSet Controller", func() { Generation: int64(2), }, }, + Provisioned: true, })) })).Should(Succeed()) @@ -627,6 +630,7 @@ var _ = Describe("InstanceSet Controller", func() { Generation: int64(128), }, }, + Provisioned: true, })) })).Should(Succeed()) @@ -674,7 +678,7 @@ var _ = Describe("InstanceSet Controller", func() { }) checkPodOrdinal([]int{0, 1, 2}, eventuallyExist) - mockPodReady(itsObj.Name+"-0", itsObj.Name+"-1", itsObj.Name+"-2") + mockPodsReady(itsObj.Namespace, itsObj.Name+"-0", itsObj.Name+"-1", itsObj.Name+"-2") By("check its status") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { g.Expect(its.Status.Ordinals).Should(HaveExactElements(int32(0), int32(1), int32(2))) @@ -696,7 +700,7 @@ var _ = Describe("InstanceSet Controller", func() { its.Spec.Replicas = ptr.To[int32](4) })()).Should(Succeed()) checkPodOrdinal([]int{0, 2, 3, 4}, eventuallyExist) - mockPodReady(itsObj.Name+"-3", itsObj.Name+"-4") + mockPodsReady(itsObj.Namespace, itsObj.Name+"-3", itsObj.Name+"-4") By("check its status") Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { g.Expect(its.Status.Ordinals).Should(HaveExactElements(int32(0), int32(2), int32(3), int32(4))) @@ -714,4 +718,357 @@ var _ = Describe("InstanceSet Controller", func() { })).Should(Succeed()) }) }) + + Context("h-scaling", func() { + var ( + memberJoinReplicas = sets.New[string]() + memberLeaveReplicas = sets.New[string]() + memberJoinError, memberLeaveError error + ) + + BeforeEach(func() { + testapps.MockKBAgentClient(func(recorder *kbacli.MockClientMockRecorder) { + recorder.Action(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, req kbaproto.ActionRequest) (kbaproto.ActionResponse, error) { + rsp := kbaproto.ActionResponse{} + switch req.Action { + case "memberJoin": + if memberJoinError != nil { + return rsp, memberJoinError + } + memberJoinReplicas.Insert(req.Parameters["KB_JOIN_MEMBER_POD_NAME"]) + case "memberLeave": + if memberLeaveError != nil { + return rsp, memberLeaveError + } + memberLeaveReplicas.Insert(req.Parameters["KB_LEAVE_MEMBER_POD_NAME"]) + } + return rsp, nil + }).AnyTimes() + }) + }) + + AfterEach(func() { + kbacli.UnsetMockClient() + memberJoinReplicas.Clear() + memberLeaveReplicas.Clear() + memberJoinError = nil + memberLeaveError = nil + }) + + setup := func(initReplicas int32, withMemberAction bool) { + createITSObj(itsName, func(f *testapps.MockInstanceSetFactory) { + f.SetReplicas(initReplicas). + SetPodManagementPolicy(appsv1.ParallelPodManagement). + SetInstanceUpdateStrategy(&workloads.InstanceUpdateStrategy{ + Type: kbappsv1.RollingUpdateStrategyType, + }) + if withMemberAction { + f.SetLifecycleActions(&kbappsv1.ComponentLifecycleActions{ + MemberJoin: testapps.NewLifecycleAction("member-join"), + MemberLeave: testapps.NewLifecycleAction("member-leave"), + }, nil) + } + }) + + replicas := make([]string, 0) + for i := int32(0); i < initReplicas; i++ { + replicas = append(replicas, fmt.Sprintf("%s-%d", itsObj.Name, i)) + } + mockPodsReady(itsObj.Namespace, replicas...) + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + } + + It("provision w/ member join", func() { + var ( + initReplicas = int32(1) + ) + + setup(initReplicas, true) + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + }) + + It("scale-out w/o member join", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, false) + + By("scale-out") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + }) + + It("scale-out w/ member join", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, true) + + By("scale-out") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member join action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberJoinReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i))).Should(BeTrue()) + } + }) + + It("scale-in w/o member leave", func() { + var ( + initReplicas = int32(2) + targetReplicas = int32(1) + ) + + setup(initReplicas, false) + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i > targetReplicas; i-- { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i-1), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := targetReplicas; i > 0; i-- { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i-1), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member leave action NOT be triggered") + Consistently(memberLeaveReplicas).Should(BeEmpty()) + }) + + It("scale-in w/ member leave - init replicas", func() { + var ( + initReplicas = int32(2) + targetReplicas = int32(1) + ) + + setup(initReplicas, true) + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i > targetReplicas; i-- { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i-1), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := targetReplicas; i > 0; i-- { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i-1), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member leave action be triggered") + for i := initReplicas; i > targetReplicas; i-- { + Eventually(memberLeaveReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i-1))).Should(BeTrue()) + } + }) + + It("scale-in w/ member leave - joined replicas", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, true) + + By("scale-out first") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + scaledReplicas := make([]string, 0) + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + scaledReplicas = append(scaledReplicas, podKey.Name) + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + mockPodsReady(itsObj.Namespace, scaledReplicas...) + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + + By("check member join action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberJoinReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i))).Should(BeTrue()) + } + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(initReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check member leave action be triggered") + for i := initReplicas; i < targetReplicas; i++ { + Eventually(memberLeaveReplicas.Has(fmt.Sprintf("%s-%d", itsObj.Name, i))).Should(BeTrue()) + } + }) + + It("scale-in w/ member leave - unjoined replicas", func() { + var ( + initReplicas = int32(1) + targetReplicas = int32(2) + ) + + setup(initReplicas, true) + + By("mock member-join action error") + memberJoinError = fmt.Errorf("mock member-join action error") + + By("scale-out first") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(targetReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas created") + scaledReplicas := make([]string, 0) + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + scaledReplicas = append(scaledReplicas, podKey.Name) + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + mockPodsReady(itsObj.Namespace, scaledReplicas...) + + By("check ITS as NOT ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeFalse()) + })).Should(Succeed()) + + By("check member join action NOT be triggered") + Consistently(memberJoinReplicas).Should(BeEmpty()) + + By("scale-in") + Expect(testapps.GetAndChangeObj(&testCtx, itsKey, func(its *workloads.InstanceSet) { + its.Spec.Replicas = ptr.To(initReplicas) + })()).ShouldNot(HaveOccurred()) + + By("check replicas deleted") + for i := initReplicas; i < targetReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Eventually(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, false)).Should(Succeed()) + } + + By("check init replicas keep running") + for i := int32(0); i < initReplicas; i++ { + podKey := types.NamespacedName{ + Namespace: itsObj.Namespace, + Name: fmt.Sprintf("%s-%d", itsObj.Name, i), + } + Consistently(testapps.CheckObjExists(&testCtx, podKey, &corev1.Pod{}, true)).Should(Succeed()) + } + + By("check ITS as ready") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.IsInstanceSetReady()).Should(BeTrue()) + })).Should(Succeed()) + + By("check member leave action NOT be triggered") + Consistently(memberLeaveReplicas).Should(BeEmpty()) + }) + }) }) diff --git a/deploy/helm/crds/workloads.kubeblocks.io_instances.yaml b/deploy/helm/crds/workloads.kubeblocks.io_instances.yaml index 2256ea93804..cd77bd89100 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_instances.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_instances.yaml @@ -1095,6 +1095,1293 @@ spec: description: Defines a set of hooks that customize the behavior of an Instance throughout its lifecycle. properties: + dataLoad: + description: Defines the procedure for importing data into a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberJoin: + description: Defines the procedure to add a new replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberLeave: + description: Defines the procedure to remove a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object reconfigure: description: Defines the procedure that update a replica with new configuration. @@ -9956,12 +11243,24 @@ spec: description: currentRevision, if not empty, indicates the version of the Instance used to generate pod. type: string + dataLoaded: + description: Represents whether the instance data is loaded. + type: boolean + inVolumeExpansion: + description: Represents whether the instance is in volume expansion. + type: boolean + memberJoined: + description: Represents whether the instance is joined the cluster. + type: boolean observedGeneration: description: |- observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the InstanceSet's generation, which is updated on mutation by the API Server. format: int64 type: integer + provisioned: + description: Represents whether the instance is provisioned. + type: boolean ready: description: Represents whether the instance is in ready condition. type: boolean @@ -9975,9 +11274,6 @@ spec: description: updateRevision, if not empty, indicates the version of the Instance used to generate pod. type: string - volumeExpansion: - description: Represents whether the instance is in volume expansion. - type: boolean type: object type: object served: true diff --git a/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml index 3d52c5542a7..3b8e23aca2d 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml @@ -2564,6 +2564,1293 @@ spec: description: Defines a set of hooks that customize the behavior of an Instance throughout its lifecycle. properties: + dataLoad: + description: Defines the procedure for importing data into a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberJoin: + description: Defines the procedure to add a new replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object + memberLeave: + description: Defines the procedure to remove a replica. + properties: + exec: + description: |- + Defines the command to run. + + + This field cannot be updated. + properties: + args: + description: Args represents the arguments that are passed + to the `command` for execution. + items: + type: string + type: array + command: + description: |- + Specifies the command to be executed inside the container. + The working directory for this command is the container's root directory('/'). + Commands are executed directly without a shell environment, meaning shell-specific syntax ('|', etc.) is not supported. + If the shell is required, it must be explicitly invoked in the command. + + + A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. + items: + type: string + type: array + container: + description: |- + Specifies the name of the container within the same pod whose resources will be shared with the action. + This allows the action to utilize the specified container's resources without executing within it. + + + The name must match one of the containers defined in `componentDefinition.spec.runtime`. + + + The resources that can be shared are included: + + + - volume mounts + + + This field cannot be updated. + type: string + env: + description: |- + Represents a list of environment variables that will be injected into the container. + These variables enable the container to adapt its behavior based on the environment it's running in. + + + This field cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: |- + Specifies the container image to be used for running the Action. + + + When specified, a dedicated container will be created using this image to execute the Action. + All actions with same image will share the same container. + + + This field cannot be updated. + type: string + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + type: object + grpc: + description: |- + Defines the gRPC call to issue. + + + This field cannot be updated. + properties: + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + description: Name of the method to invoke on the gRPC + service. + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "50051") or a named port defined in the container spec. + type: string + request: + additionalProperties: + type: string + description: |- + Request payload for the gRPC method. + + + Keys are proto field names (lowerCamelCase); values are strings that can include Go templates. + Templates are rendered with predefined action variables before the request is sent. + type: object + response: + description: Required response schema for the gRPC method. + properties: + message: + description: |- + Name of the field in the response whose value should be output. + Printed to stdout on success, or stderr on failure. + type: string + status: + description: |- + Name of the string field in the response that carries status information. + If non-empty, the action fails. + type: string + type: object + service: + description: Fully-qualified name of the gRPC service + to call. + type: string + required: + - method + - port + - service + type: object + http: + description: |- + Defines the HTTP request to perform. + + + This field cannot be updated. + properties: + body: + description: |- + Optional HTTP request body. + + + Supports Go text/template syntax; rendered with predefined variables before sending. + type: string + headers: + description: |- + Custom headers to set in the request. + Header values may use Go text/template syntax, rendered with predefined variables. + items: + description: HTTPHeader represents a single HTTP header + key/value pair. + properties: + name: + description: Name of the header field. + type: string + value: + description: Value of the header field. + type: string + required: + - name + - value + type: object + type: array + host: + description: |- + The target host to connect to. + Defaults to "127.0.0.1" if not specified. + type: string + method: + default: GET + description: |- + The HTTP method to use. + Defaults to "GET". + enum: + - GET + - POST + - PUT + - DELETE + - HEAD + - PATCH + type: string + path: + default: / + description: |- + The path to request on the HTTP server. + Defaults to "/" if not specified. + pattern: ^/.* + type: string + port: + description: |- + The port to access on the host. + It may be a numeric string (e.g., "8080") or a named port defined in the container spec. + type: string + scheme: + default: HTTP + description: |- + The scheme to use for connecting to the host. + Defaults to "HTTP". + enum: + - HTTP + - HTTPS + type: string + required: + - port + type: object + matchingKey: + description: |- + Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. + The impact of this field depends on the `targetPodSelector` value: + + + - When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. + - When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` + will be selected for the Action. + + + This field cannot be updated. + type: string + preCondition: + description: |- + Specifies the state that the cluster must reach before the Action is executed. + Currently, this is only applicable to the `postProvision` action. + + + The conditions are as follows: + + + - `Immediately`: Executed right after the Component object is created. + The readiness of the Component and its resources is not guaranteed at this stage. + - `RuntimeReady`: The Action is triggered after the Component object has been created and all associated + runtime resources (e.g. Pods) are in a ready state. + - `ComponentReady`: The Action is triggered after the Component itself is in a ready state. + This process does not affect the readiness state of the Component or the Cluster. + - `ClusterReady`: The Action is executed after the Cluster is in a ready state. + This execution does not alter the Component or the Cluster's state of readiness. + + + This field cannot be updated. + type: string + retryPolicy: + description: |- + Defines the strategy to be taken when retrying the Action after a failure. + + + It specifies the conditions under which the Action should be retried and the limits to apply, + such as the maximum number of retries and backoff strategy. + + + This field cannot be updated. + properties: + maxRetries: + default: 0 + description: |- + Defines the maximum number of retry attempts that should be made for a given Action. + This value is set to 0 by default, indicating that no retries will be made. + type: integer + retryInterval: + default: 0 + description: |- + Indicates the duration of time to wait between each retry attempt. + This value is set to 0 by default, indicating that there will be no delay between retry attempts. + format: int64 + type: integer + type: object + targetPodSelector: + description: |- + Defines the criteria used to select the target Pod(s) for executing the Action. + This is useful when there is no default target replica identified. + It allows for precise control over which Pod(s) the Action should run in. + + + If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod + to be removed or added; or a random pod if the Action is triggered at the component level, such as + post-provision or pre-terminate of the component. + + + This field cannot be updated. + enum: + - Any + - All + - Role + - Ordinal + type: string + timeoutSeconds: + default: 0 + description: |- + Specifies the maximum duration in seconds that the Action is allowed to run. + + + If the Action does not complete within this time frame, it will be terminated. + + + This field cannot be updated. + format: int32 + type: integer + type: object reconfigure: description: Defines the procedure that update a replica with new configuration. @@ -11721,16 +13008,25 @@ spec: - name type: object type: array + dataLoaded: + description: Represents whether the instance data is loaded. + type: boolean + inVolumeExpansion: + description: Represents whether the instance is in volume expansion. + type: boolean + memberJoined: + description: Represents whether the instance is joined the cluster. + type: boolean podName: default: Unknown description: Represents the name of the pod. type: string + provisioned: + description: Represents whether the instance is provisioned. + type: boolean role: description: Represents the role of the instance observed. type: string - volumeExpansion: - description: Represents whether the instance is in volume expansion. - type: boolean required: - podName type: object diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index 009bac26830..0cb64503207 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -33308,7 +33308,43 @@ string -volumeExpansion
+provisioned
+ +bool + + + +(Optional) +

Represents whether the instance is provisioned.

+ + + + +dataLoaded
+ +bool + + + +(Optional) +

Represents whether the instance data is loaded.

+ + + + +memberJoined
+ +bool + + + +(Optional) +

Represents whether the instance is joined the cluster.

+ + + + +inVolumeExpansion
bool @@ -33438,7 +33474,43 @@ string -volumeExpansion
+provisioned
+ +bool + + + +(Optional) +

Represents whether the instance is provisioned.

+ + + + +dataLoaded
+ +bool + + + +(Optional) +

Represents whether the instance data is loaded.

+ + + + +memberJoined
+ +bool + + + +(Optional) +

Represents whether the instance is joined the cluster.

+ + + + +inVolumeExpansion
bool @@ -33756,6 +33828,48 @@ Action +memberJoin
+ + +Action + + + + +(Optional) +

Defines the procedure to add a new replica.

+ + + + +memberLeave
+ + +Action + + + + +(Optional) +

Defines the procedure to remove a replica.

+ + + + +dataLoad
+ + +Action + + + + +(Optional) +

Defines the procedure for importing data into a replica.

+ + + + reconfigure
diff --git a/pkg/constant/annotations.go b/pkg/constant/annotations.go index 4788d9c9538..f061516885e 100644 --- a/pkg/constant/annotations.go +++ b/pkg/constant/annotations.go @@ -50,6 +50,8 @@ const ( NodeSelectorOnceAnnotationKey = "workloads.kubeblocks.io/node-selector-once" PVCNamePrefixAnnotationKey = "apps.kubeblocks.io/pvc-name-prefix" + + LifeCycleDataLoadedAnnotationKey = "apps.kubeblocks.io/lifecycle-data-loaded" ) const ( diff --git a/pkg/controller/builder/builder_instance.go b/pkg/controller/builder/builder_instance.go index 2716fe460dc..1e2902cb3ba 100644 --- a/pkg/controller/builder/builder_instance.go +++ b/pkg/controller/builder/builder_instance.go @@ -206,8 +206,10 @@ func (builder *InstanceBuilder) SetRoles(roles []workloads.ReplicaRole) *Instanc return builder } -func (builder *InstanceBuilder) SetLifecycleActions(actions *workloads.LifecycleActions) *InstanceBuilder { - builder.get().Spec.LifecycleActions = actions +func (builder *InstanceBuilder) SetLifecycleActions(actions *workloads.LifecycleActions, initializing bool) *InstanceBuilder { + if !initializing { + builder.get().Spec.LifecycleActions = actions + } return builder } diff --git a/pkg/controller/builder/builder_instance_set.go b/pkg/controller/builder/builder_instance_set.go index 903e092aafa..b2d07eadf64 100644 --- a/pkg/controller/builder/builder_instance_set.go +++ b/pkg/controller/builder/builder_instance_set.go @@ -133,6 +133,9 @@ func (builder *InstanceSetBuilder) SetLifecycleActions(lifecycleActions *kbappsv } if lifecycleActions != nil { builder.get().Spec.LifecycleActions.Switchover = lifecycleActions.Switchover + builder.get().Spec.LifecycleActions.MemberJoin = lifecycleActions.MemberJoin + builder.get().Spec.LifecycleActions.MemberLeave = lifecycleActions.MemberLeave + builder.get().Spec.LifecycleActions.DataLoad = lifecycleActions.DataLoad builder.get().Spec.LifecycleActions.Reconfigure = lifecycleActions.Reconfigure } if templateVars != nil { diff --git a/pkg/controller/component/kbagent.go b/pkg/controller/component/kbagent.go index ad8ba87cf27..a5bac91e475 100644 --- a/pkg/controller/component/kbagent.go +++ b/pkg/controller/component/kbagent.go @@ -106,18 +106,18 @@ func buildKBAgentTaskEnv(task proto.Task) (map[string]string, error) { }, nil } -func updateKBAgentTaskEnv(envVars map[string]string, f func(proto.Task) *proto.Task) (map[string]string, error) { - envVar, err := kbagent.UpdateEnv4Worker(envVars, f) - if err != nil { - return nil, err - } - if envVar == nil { - return nil, nil - } - return map[string]string{ - envVar.Name: envVar.Value, - }, nil -} +// func updateKBAgentTaskEnv(envVars map[string]string, f func(proto.Task) *proto.Task) (map[string]string, error) { +// envVar, err := kbagent.UpdateEnv4Worker(envVars, f) +// if err != nil { +// return nil, err +// } +// if envVar == nil { +// return nil, nil +// } +// return map[string]string{ +// envVar.Name: envVar.Value, +// }, nil +// } func buildKBAgentContainer(synthesizedComp *SynthesizedComponent) error { if !hasActionDefined(synthesizedComp) { diff --git a/pkg/controller/component/kbagent_task_event.go b/pkg/controller/component/kbagent_task_event.go index d48e576c3d0..f9b89cfba96 100644 --- a/pkg/controller/component/kbagent_task_event.go +++ b/pkg/controller/component/kbagent_task_event.go @@ -48,7 +48,7 @@ func (h *KBAgentTaskEventHandler) Handle(cli client.Client, reqCtx intctrlutil.R func (h *KBAgentTaskEventHandler) isTaskEvent(event *corev1.Event) bool { return event.ReportingController == proto.ProbeEventReportingController && - event.Reason == "task" && event.InvolvedObject.FieldPath == proto.ProbeEventFieldPath + (event.Reason == "task" || event.Reason == newReplicaTask) && event.InvolvedObject.FieldPath == proto.ProbeEventFieldPath } func (h *KBAgentTaskEventHandler) handleEvent(reqCtx intctrlutil.RequestCtx, cli client.Client, namespace string, event proto.TaskEvent) error { diff --git a/pkg/controller/component/new_replicas.go b/pkg/controller/component/new_replicas.go new file mode 100644 index 00000000000..b79bcf0ba98 --- /dev/null +++ b/pkg/controller/component/new_replicas.go @@ -0,0 +1,141 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package component + +import ( + "context" + "strings" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" + "github.com/apecloud/kubeblocks/pkg/kbagent/proto" +) + +const ( + // new replicas task & event + newReplicaTask = "newReplica" + defaultNewReplicaTaskReportPeriodSeconds = 60 +) + +func NewReplicaTask(compName, uid string, source lifecycle.Replica, replicas []string) (map[string]string, error) { + host, port, err := source.StreamingEndpoint() + if err != nil { + return nil, err + } + task := proto.Task{ + Instance: compName, + Task: newReplicaTask, + UID: uid, + Replicas: strings.Join(replicas, ","), + NotifyAtFinish: true, + ReportPeriodSeconds: defaultNewReplicaTaskReportPeriodSeconds, + NewReplica: &proto.NewReplicaTask{ + Remote: host, + Port: port, + Replicas: strings.Join(replicas, ","), + }, + } + return buildKBAgentTaskEnv(task) +} + +func handleNewReplicaTaskEvent(logger logr.Logger, ctx context.Context, cli client.Client, namespace string, event proto.TaskEvent) error { + key := types.NamespacedName{ + Namespace: namespace, + Name: event.Replica, + } + pod := &corev1.Pod{} + if err := cli.Get(ctx, key, pod); err != nil { + logger.Error(err, "get pod failed when handle new replica event", + "code", event.Code, "finished", !event.EndTime.IsZero(), "message", event.Message) + return err + } + + var err error + finished := !event.EndTime.IsZero() + if finished && event.Code == 0 { + err = handleNewReplicaTaskEvent4Finished(ctx, cli, pod, event) + } + if err != nil { + logger.Error(err, "handle new replica task event failed", + "code", event.Code, "finished", finished, "message", event.Message) + } else { + logger.Info("handle new replica task event success", + "code", event.Code, "finished", finished, "message", event.Message) + } + return err +} + +func handleNewReplicaTaskEvent4Finished(ctx context.Context, cli client.Client, pod *corev1.Pod, event proto.TaskEvent) error { + // if err := func() error { + // envKey := types.NamespacedName{ + // Namespace: its.Namespace, + // Name: constant.GetCompEnvCMName(its.Name), + // } + // obj := &corev1.ConfigMap{} + // err := cli.Get(ctx, envKey, obj) + // if err != nil { + // return err + // } + // + // parameters, err := updateKBAgentTaskEnv(obj.Data, func(task proto.Task) *proto.Task { + // if task.Task == newReplicaTask { + // replicas := strings.Split(task.Replicas, ",") + // replicas = slices.DeleteFunc(replicas, func(r string) bool { + // return r == event.Replica + // }) + // if len(replicas) == 0 { + // return nil + // } + // task.Replicas = strings.Join(replicas, ",") + // if task.NewReplica != nil { + // task.NewReplica.Replicas = task.Replicas + // } + // } + // return &task + // }) + // if err != nil { + // return err + // } + // if parameters == nil { + // return nil // do nothing + // } + // + // if obj.Data == nil { + // obj.Data = make(map[string]string) + // } + // for k, v := range parameters { + // obj.Data[k] = v + // } + // return cli.Update(ctx, obj) + // }(); err != nil { + // return err + // } + + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[constant.LifeCycleDataLoadedAnnotationKey] = "true" + return cli.Update(ctx, pod) +} diff --git a/pkg/controller/component/replicas.go b/pkg/controller/component/replicas.go deleted file mode 100644 index 0e713fb05a7..00000000000 --- a/pkg/controller/component/replicas.go +++ /dev/null @@ -1,399 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package component - -import ( - "context" - "encoding/json" - "fmt" - "slices" - "strings" - "time" - - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" - "github.com/apecloud/kubeblocks/pkg/constant" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" - "github.com/apecloud/kubeblocks/pkg/kbagent" - "github.com/apecloud/kubeblocks/pkg/kbagent/proto" -) - -const ( - replicaStatusAnnotationKey = "apps.kubeblocks.io/replicas-status" - - // new replicas task & event - newReplicaTask = "newReplica" - defaultNewReplicaTaskReportPeriodSeconds = 60 -) - -type ReplicasStatus struct { - Replicas int32 `json:"replicas"` - Status []ReplicaStatus `json:"status"` -} - -type ReplicaStatus struct { - Name string `json:"name"` - Generation string `json:"generation"` - CreationTimestamp time.Time `json:"creationTimestamp"` - DeletionTimestamp *time.Time `json:"deletionTimestamp,omitempty"` - Message string `json:"message,omitempty"` - Provisioned bool `json:"provisioned,omitempty"` - DataLoaded *bool `json:"dataLoaded,omitempty"` - MemberJoined *bool `json:"memberJoined,omitempty"` - Reconfigured *string `json:"reconfigured,omitempty"` // TODO: component status -} - -func BuildReplicasStatus(running, proto *workloads.InstanceSet) { - if running == nil || proto == nil { - return - } - annotations := running.Annotations - if annotations == nil { - return - } - message, ok := annotations[replicaStatusAnnotationKey] - if !ok { - return - } - if proto.Annotations == nil { - proto.Annotations = make(map[string]string) - } - proto.Annotations[replicaStatusAnnotationKey] = message -} - -func NewReplicasStatus(its *workloads.InstanceSet, replicas []string, hasMemberJoin, hasDataAction bool) error { - loaded := func() *bool { - if hasDataAction { - return ptr.To(false) - } - return nil - }() - joined := func() *bool { - if hasMemberJoin { - return ptr.To(false) - } - return nil - }() - return UpdateReplicasStatusFunc(its, func(status *ReplicasStatus) error { - status.Replicas = *its.Spec.Replicas - if status.Status == nil { - status.Status = make([]ReplicaStatus, 0) - } - for _, name := range replicas { - if slices.ContainsFunc(status.Status, func(s ReplicaStatus) bool { - return s.Name == name - }) { - continue - } - status.Status = append(status.Status, ReplicaStatus{ - Name: name, - Generation: compGenerationFromITS(its), - CreationTimestamp: time.Now(), - Provisioned: false, - DataLoaded: loaded, - MemberJoined: joined, - }) - } - return nil - }) -} - -func DeleteReplicasStatus(its *workloads.InstanceSet, replicas []string, f func(status ReplicaStatus)) error { - return UpdateReplicasStatusFunc(its, func(status *ReplicasStatus) error { - status.Replicas = *its.Spec.Replicas - status.Status = slices.DeleteFunc(status.Status, func(s ReplicaStatus) bool { - if slices.Contains(replicas, s.Name) { - if f != nil { - f(s) - } - return true - } - return false - }) - return nil - }) -} - -func StatusReplicasStatus(its *workloads.InstanceSet, replicas []string, hasMemberJoin, hasDataAction bool) error { - loaded := func() *bool { - if hasDataAction { - return ptr.To(true) - } - return nil - }() - joined := func() *bool { - if hasMemberJoin { - return ptr.To(true) - } - return nil - }() - return UpdateReplicasStatusFunc(its, func(status *ReplicasStatus) error { - status.Replicas = *its.Spec.Replicas - if status.Status == nil { - status.Status = make([]ReplicaStatus, 0) - } - for _, replica := range replicas { - i := slices.IndexFunc(status.Status, func(s ReplicaStatus) bool { - return s.Name == replica - }) - if i >= 0 { - status.Status[i].Provisioned = true - } else { - status.Status = append(status.Status, ReplicaStatus{ - Name: replica, - Generation: compGenerationFromITS(its), - CreationTimestamp: its.CreationTimestamp.Time, - Provisioned: true, - DataLoaded: loaded, - MemberJoined: joined, - }) - } - } - return nil - }) -} - -func UpdateReplicasStatusFunc(its *workloads.InstanceSet, f func(status *ReplicasStatus) error) error { - if f == nil { - return nil - } - - status, err := getReplicasStatus(its) - if err != nil { - return err - } - - if err = f(&status); err != nil { - return err - } - - return setReplicasStatus(its, status) -} - -func GetReplicasStatusFunc(its *workloads.InstanceSet, f func(ReplicaStatus) bool) ([]string, error) { - if f == nil { - return nil, nil - } - status, err := getReplicasStatus(its) - if err != nil { - return nil, err - } - replicas := make([]string, 0) - for _, s := range status.Status { - if f(s) { - replicas = append(replicas, s.Name) - } - } - return replicas, nil -} - -func NewReplicaTask(compName, uid string, source *corev1.Pod, replicas []string) (map[string]string, error) { - port, err := intctrlutil.GetPortByName(*source, kbagent.ContainerName, kbagent.DefaultStreamingPortName) - if err != nil { - return nil, err - } - task := proto.Task{ - Instance: compName, - Task: newReplicaTask, - UID: uid, - Replicas: strings.Join(replicas, ","), - NotifyAtFinish: true, - ReportPeriodSeconds: defaultNewReplicaTaskReportPeriodSeconds, - NewReplica: &proto.NewReplicaTask{ - Remote: intctrlutil.PodFQDN(source.Namespace, compName, source.Name), - Port: port, - Replicas: strings.Join(replicas, ","), - }, - } - return buildKBAgentTaskEnv(task) -} - -func compGenerationFromITS(its *workloads.InstanceSet) string { - if its == nil { - return "" - } - annotations := its.Annotations - if annotations == nil { - return "" - } - return annotations[constant.KubeBlocksGenerationKey] -} - -func getReplicasStatus(its *workloads.InstanceSet) (ReplicasStatus, error) { - if its == nil { - return ReplicasStatus{}, nil - } - annotations := its.GetAnnotations() - if annotations == nil { - return ReplicasStatus{}, nil - } - message, ok := annotations[replicaStatusAnnotationKey] - if !ok { - return ReplicasStatus{}, nil - } - status := &ReplicasStatus{} - err := json.Unmarshal([]byte(message), &status) - if err != nil { - return ReplicasStatus{}, err - } - return *status, nil -} - -func setReplicasStatus(its *workloads.InstanceSet, status ReplicasStatus) error { - if its == nil { - return nil - } - out, err := json.Marshal(&status) - if err != nil { - return err - } - annotations := its.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations[replicaStatusAnnotationKey] = string(out) - its.SetAnnotations(annotations) - return nil -} - -func handleNewReplicaTaskEvent(logger logr.Logger, ctx context.Context, cli client.Client, namespace string, event proto.TaskEvent) error { - key := types.NamespacedName{ - Namespace: namespace, - Name: event.Instance, - } - its := &workloads.InstanceSet{} - if err := cli.Get(ctx, key, its); err != nil { - logger.Error(err, "get ITS failed when handle new replica task event", - "code", event.Code, "finished", !event.EndTime.IsZero(), "message", event.Message) - return err - } - - var err error - finished := !event.EndTime.IsZero() - switch { - case finished && event.Code == 0: - err = handleNewReplicaTaskEvent4Finished(ctx, cli, its, event) - case finished: - err = handleNewReplicaTaskEvent4Failed(ctx, cli, its, event) - default: - err = handleNewReplicaTaskEvent4Unfinished(ctx, cli, its, event) - } - if err != nil { - logger.Error(err, "handle new replica task event failed", - "code", event.Code, "finished", finished, "message", event.Message) - } else { - logger.Info("handle new replica task event success", - "code", event.Code, "finished", finished, "message", event.Message) - } - return err -} - -func handleNewReplicaTaskEvent4Finished(ctx context.Context, cli client.Client, its *workloads.InstanceSet, event proto.TaskEvent) error { - if err := func() error { - envKey := types.NamespacedName{ - Namespace: its.Namespace, - Name: constant.GetCompEnvCMName(its.Name), - } - obj := &corev1.ConfigMap{} - err := cli.Get(ctx, envKey, obj) - if err != nil { - return err - } - - parameters, err := updateKBAgentTaskEnv(obj.Data, func(task proto.Task) *proto.Task { - if task.Task == newReplicaTask { - replicas := strings.Split(task.Replicas, ",") - replicas = slices.DeleteFunc(replicas, func(r string) bool { - return r == event.Replica - }) - if len(replicas) == 0 { - return nil - } - task.Replicas = strings.Join(replicas, ",") - if task.NewReplica != nil { - task.NewReplica.Replicas = task.Replicas - } - } - return &task - }) - if err != nil { - return err - } - if parameters == nil { - return nil // do nothing - } - - if obj.Data == nil { - obj.Data = make(map[string]string) - } - for k, v := range parameters { - obj.Data[k] = v - } - return cli.Update(ctx, obj) - }(); err != nil { - return err - } - return updateReplicaStatusFunc(ctx, cli, its, event.Replica, func(status *ReplicaStatus) error { - status.Message = "" - status.Provisioned = true - status.DataLoaded = ptr.To(true) - return nil - }) -} - -func handleNewReplicaTaskEvent4Unfinished(ctx context.Context, cli client.Client, its *workloads.InstanceSet, event proto.TaskEvent) error { - return updateReplicaStatusFunc(ctx, cli, its, event.Replica, func(status *ReplicaStatus) error { - status.Message = event.Message - status.Provisioned = true - status.DataLoaded = ptr.To(false) - return nil - }) -} - -func handleNewReplicaTaskEvent4Failed(ctx context.Context, cli client.Client, its *workloads.InstanceSet, event proto.TaskEvent) error { - return updateReplicaStatusFunc(ctx, cli, its, event.Replica, func(status *ReplicaStatus) error { - status.Message = event.Message - status.Provisioned = true - return nil - }) -} - -func updateReplicaStatusFunc(ctx context.Context, cli client.Client, - its *workloads.InstanceSet, replicaName string, f func(*ReplicaStatus) error) error { - if err := UpdateReplicasStatusFunc(its, func(status *ReplicasStatus) error { - for i := range status.Status { - if status.Status[i].Name == replicaName { - if f != nil { - return f(&status.Status[i]) - } - return nil - } - } - return fmt.Errorf("replica %s not found", replicaName) - }); err != nil { - return err - } - return cli.Update(ctx, its) -} diff --git a/pkg/controller/component/replicas_test.go b/pkg/controller/component/replicas_test.go deleted file mode 100644 index 8841f564f0b..00000000000 --- a/pkg/controller/component/replicas_test.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package component - -import ( - "slices" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" - "github.com/apecloud/kubeblocks/pkg/constant" -) - -var _ = Describe("replicas", func() { - var ( - its *workloads.InstanceSet - replicas []string - ) - - cleanEnv := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - } - - BeforeEach(func() { - cleanEnv() - }) - - AfterEach(func() { - cleanEnv() - }) - - Context("status", func() { - BeforeEach(func() { - its = &workloads.InstanceSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testCtx.DefaultNamespace, - Name: "test-cluster-its", - CreationTimestamp: metav1.Now(), - Annotations: map[string]string{ - constant.KubeBlocksGenerationKey: "1", - }, - }, - Spec: workloads.InstanceSetSpec{ - Replicas: ptr.To[int32](3), - }, - } - replicas = []string{"test-cluster-its-0", "test-cluster-its-1", "test-cluster-its-2"} - }) - - It("status init replicas", func() { - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - Expect(its.Annotations).Should(HaveKey(replicaStatusAnnotationKey)) - - status, err := getReplicasStatus(its) - Expect(err).Should(BeNil()) - Expect(status.Replicas).Should(Equal(int32(3))) - Expect(status.Status).Should(HaveLen(int(status.Replicas))) - for _, s := range status.Status { - Expect(replicas).Should(ContainElement(s.Name)) - Expect(s.Generation).Should(Equal("1")) - Expect(s.CreationTimestamp.Equal(its.CreationTimestamp.Time)).Should(BeTrue()) - Expect(s.Provisioned).Should(BeTrue()) - Expect(s.DataLoaded).ShouldNot(BeNil()) - Expect(*s.DataLoaded).Should(BeTrue()) - Expect(s.MemberJoined).ShouldNot(BeNil()) - Expect(*s.MemberJoined).Should(BeTrue()) - } - }) - - It("new replicas", func() { - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - - its.Annotations[constant.KubeBlocksGenerationKey] = "2" - its.Spec.Replicas = ptr.To[int32](5) - newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - - status, err := getReplicasStatus(its) - Expect(err).Should(BeNil()) - Expect(status.Replicas).Should(Equal(int32(5))) - Expect(status.Status).Should(HaveLen(int(status.Replicas))) - for _, s := range status.Status { - if slices.Contains(newReplicas, s.Name) { - Expect(s.Generation).Should(Equal("2")) - Expect(s.CreationTimestamp.Equal(its.CreationTimestamp.Time)).Should(BeFalse()) - Expect(s.Provisioned).Should(BeFalse()) - Expect(s.DataLoaded).ShouldNot(BeNil()) - Expect(*s.DataLoaded).Should(BeFalse()) - Expect(s.MemberJoined).ShouldNot(BeNil()) - Expect(*s.MemberJoined).Should(BeFalse()) - } - } - }) - - It("delete replicas", func() { - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - - its.Annotations[constant.KubeBlocksGenerationKey] = "2" - its.Spec.Replicas = ptr.To[int32](2) - deleteReplicas := []string{"test-cluster-its-2"} - Expect(DeleteReplicasStatus(its, deleteReplicas, func(s ReplicaStatus) { - Expect(s.Provisioned).Should(BeTrue()) - Expect(s.DataLoaded).ShouldNot(BeNil()) - Expect(*s.DataLoaded).Should(BeTrue()) - Expect(s.MemberJoined).ShouldNot(BeNil()) - Expect(*s.MemberJoined).Should(BeTrue()) - })).Should(Succeed()) - - status, err := getReplicasStatus(its) - Expect(err).Should(BeNil()) - Expect(status.Replicas).Should(Equal(int32(2))) - Expect(status.Status).Should(HaveLen(int(status.Replicas))) - }) - - It("status new replicas", func() { - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - - its.Annotations[constant.KubeBlocksGenerationKey] = "2" - its.Spec.Replicas = ptr.To[int32](5) - newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - - replicas = append(replicas, "test-cluster-its-3") - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - - status, err := getReplicasStatus(its) - Expect(err).Should(BeNil()) - for _, s := range status.Status { - if s.Name == "test-cluster-its-3" { - Expect(s.Provisioned).Should(BeTrue()) // provisioned - Expect(s.DataLoaded).ShouldNot(BeNil()) - Expect(*s.DataLoaded).Should(BeFalse()) // not loaded - Expect(s.MemberJoined).ShouldNot(BeNil()) - Expect(*s.MemberJoined).Should(BeFalse()) // not joined - } - } - }) - - It("delete new replicas", func() { - Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - - its.Annotations[constant.KubeBlocksGenerationKey] = "2" - its.Spec.Replicas = ptr.To[int32](5) - newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - - its.Annotations[constant.KubeBlocksGenerationKey] = "3" - its.Spec.Replicas = ptr.To[int32](4) - deleteReplicas := []string{"test-cluster-its-4"} - Expect(DeleteReplicasStatus(its, deleteReplicas, func(s ReplicaStatus) { - Expect(s.Provisioned).Should(BeFalse()) - Expect(s.DataLoaded).ShouldNot(BeNil()) - Expect(*s.DataLoaded).Should(BeFalse()) - Expect(s.MemberJoined).ShouldNot(BeNil()) - Expect(*s.MemberJoined).Should(BeFalse()) - })).Should(Succeed()) - }) - - // It("task event for new replicas - succeed", func() { - // Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - // - // its.Annotations[constant.KubeBlocksGenerationKey] = "2" - // its.Spec.Replicas = ptr.To[int32](5) - // newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - // Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - // - // cli := testutil.NewK8sMockClient() - // cli.MockGetMethod(testutil.WithGetReturned(func(key client.ObjectKey, obj client.Object) error { - // // TODO: mock - // return fmt.Errorf("not found") - // }, testutil.WithAnyTimes())) - // cli.MockUpdateMethod(testutil.WithSucceed(testutil.WithAnyTimes())) - // event := proto.TaskEvent{ - // Instance: "test-cluster-its", - // Replica: "test-cluster-its-3", - // EndTime: time.Now(), - // Code: 0, - // } - // Expect(handleNewReplicaTaskEvent(logger, testCtx.Ctx, cli.Client(), testCtx.DefaultNamespace, event)).Should(Succeed()) - // - // status, err := getReplicasStatus(its) - // Expect(err).Should(BeNil()) - // for _, s := range status.Status { - // if s.Name == "test-cluster-its-3" { - // Expect(s.Provisioned).Should(BeTrue()) // provisioned - // Expect(s.DataLoaded).ShouldNot(BeNil()) - // Expect(*s.DataLoaded).Should(BeTrue()) // loaded - // Expect(s.MemberJoined).ShouldNot(BeNil()) - // Expect(*s.MemberJoined).Should(BeFalse()) // not joined - // } - // } - // }) - // - // It("task event for new replicas - failed", func() { - // Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - // - // its.Annotations[constant.KubeBlocksGenerationKey] = "2" - // its.Spec.Replicas = ptr.To[int32](5) - // newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - // Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - // - // cli := testutil.NewK8sMockClient() - // cli.MockGetMethod(testutil.WithGetReturned(func(key client.ObjectKey, obj client.Object) error { - // // TODO: mock - // return fmt.Errorf("not found") - // }, testutil.WithAnyTimes())) - // cli.MockUpdateMethod(testutil.WithSucceed(testutil.WithAnyTimes())) - // event := proto.TaskEvent{ - // Instance: "test-cluster-its", - // Replica: "test-cluster-its-3", - // EndTime: time.Now(), - // Code: -1, - // Message: "failed", - // } - // Expect(handleNewReplicaTaskEvent(logger, testCtx.Ctx, cli.Client(), testCtx.DefaultNamespace, event)).Should(Succeed()) - // - // status, err := getReplicasStatus(its) - // Expect(err).Should(BeNil()) - // for _, s := range status.Status { - // if s.Name == "test-cluster-its-3" { - // Expect(s.Provisioned).Should(BeTrue()) // provisioned - // Expect(s.DataLoaded).ShouldNot(BeNil()) - // Expect(*s.DataLoaded).Should(BeFalse()) // not loaded - // Expect(s.MemberJoined).ShouldNot(BeNil()) - // Expect(*s.MemberJoined).Should(BeFalse()) // not joined - // Expect(s.Message).Should(Equal("failed")) - // } - // } - // }) - // - // It("task event for new replicas - in progress", func() { - // Expect(StatusReplicasStatus(its, replicas, true, true)).Should(Succeed()) - // - // its.Annotations[constant.KubeBlocksGenerationKey] = "2" - // its.Spec.Replicas = ptr.To[int32](5) - // newReplicas := []string{"test-cluster-its-3", "test-cluster-its-4"} - // Expect(NewReplicasStatus(its, newReplicas, true, true)).Should(Succeed()) - // - // cli := testutil.NewK8sMockClient() - // cli.MockGetMethod(testutil.WithGetReturned(func(key client.ObjectKey, obj client.Object) error { - // // TODO: mock - // return fmt.Errorf("not found") - // }, testutil.WithAnyTimes())) - // cli.MockUpdateMethod(testutil.WithSucceed(testutil.WithAnyTimes())) - // event := proto.TaskEvent{ - // Instance: "test-cluster-its", - // Replica: "test-cluster-its-3", - // // EndTime: time.Now(), - // Code: 0, - // Message: "90", - // } - // Expect(handleNewReplicaTaskEvent(logger, testCtx.Ctx, cli.Client(), testCtx.DefaultNamespace, event)).Should(Succeed()) - // - // status, err := getReplicasStatus(its) - // Expect(err).Should(BeNil()) - // for _, s := range status.Status { - // if s.Name == "test-cluster-its-3" { - // Expect(s.Provisioned).Should(BeTrue()) // provisioned - // Expect(s.DataLoaded).ShouldNot(BeNil()) - // Expect(*s.DataLoaded).Should(BeFalse()) // not loaded - // Expect(s.MemberJoined).ShouldNot(BeNil()) - // Expect(*s.MemberJoined).Should(BeFalse()) // not joined - // Expect(s.Message).Should(Equal("90")) - // } - // } - // }) - }) -}) diff --git a/pkg/controller/component/utils.go b/pkg/controller/component/utils.go index 0f2f1d6b8da..a9daa2e23da 100644 --- a/pkg/controller/component/utils.go +++ b/pkg/controller/component/utils.go @@ -41,10 +41,6 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) -func inDataContext() *multicluster.ClientOption { - return multicluster.InDataContext() -} - func ValidateDefNameRegexp(defNamePattern string) error { _, err := regexp.Compile(defNamePattern) return err diff --git a/pkg/controller/component/vars.go b/pkg/controller/component/vars.go index bf95518a8fc..21706623f35 100644 --- a/pkg/controller/component/vars.go +++ b/pkg/controller/component/vars.go @@ -944,7 +944,7 @@ func clusterServiceGetter(ctx context.Context, cli client.Reader, namespace, clu Name: constant.GenerateClusterServiceName(clusterName, name), } obj := &corev1.Service{} - err := cli.Get(ctx, key, obj, inDataContext()) // TODO: cluster service + err := cli.Get(ctx, key, obj) // TODO: cluster service return &resolvedServiceObj{service: obj}, err } @@ -975,7 +975,7 @@ func compServiceGetter(ctx context.Context, cli client.Reader, namespace, cluste Name: svcName, } obj := &corev1.Service{} - err = cli.Get(ctx, key, obj, inDataContext()) // TODO: cmp service + err = cli.Get(ctx, key, obj) // TODO: cmp service if err == nil { return &resolvedServiceObj{service: obj}, nil } @@ -986,7 +986,7 @@ func compServiceGetter(ctx context.Context, cli client.Reader, namespace, cluste // fall-back to list services and find the matched prefix svcList := &corev1.ServiceList{} matchingLabels := client.MatchingLabels(constant.GetCompLabels(clusterName, compName)) - err = cli.List(ctx, svcList, matchingLabels, inDataContext()) // TODO: cmp service + err = cli.List(ctx, svcList, matchingLabels) // TODO: cmp service if err != nil { return nil, err } @@ -1241,22 +1241,22 @@ func componentVarPodsGetter(ctx context.Context, cli client.Reader, // TODO: what if the component is being deleted? } - its := &workloadsv1.InstanceSet{} - itsKey := types.NamespacedName{ - Namespace: namespace, - Name: constant.GenerateWorkloadNamePattern(clusterName, compName), - } - err := cli.Get(ctx, itsKey, its) - if err != nil && !apierrors.IsNotFound(err) { - return "", err - } - - var names []string - if err == nil { - names, err = GeneratePodNamesByITS(its) - } else { - names, err = GeneratePodNamesByComp(comp) - } + // its := &workloadsv1.InstanceSet{} + // itsKey := types.NamespacedName{ + // Namespace: namespace, + // Name: constant.GenerateWorkloadNamePattern(clusterName, compName), + // } + // err := cli.Get(ctx, itsKey, its) + // if err != nil && !apierrors.IsNotFound(err) { + // return "", err + // } + + // var names []string + // if err == nil { + // names, err = GeneratePodNamesByITS(its) + // } else { + names, err := GeneratePodNamesByComp(comp) + // } if err != nil { return "", err } diff --git a/pkg/controller/component/workload_utils.go b/pkg/controller/component/workload_utils.go index 5b4701c4b42..a66e45ff99a 100644 --- a/pkg/controller/component/workload_utils.go +++ b/pkg/controller/component/workload_utils.go @@ -22,7 +22,6 @@ package component import ( "context" "fmt" - "maps" "reflect" "strconv" "strings" @@ -43,17 +42,6 @@ func ListOwnedWorkloads(ctx context.Context, cli client.Reader, namespace, clust return listWorkloads(ctx, cli, namespace, clusterName, compName) } -func ListOwnedPods(ctx context.Context, cli client.Reader, namespace, clusterName, compName string, - opts ...client.ListOption) ([]*corev1.Pod, error) { - return listPods(ctx, cli, namespace, clusterName, compName, nil, opts...) -} - -func ListOwnedPodsWithRole(ctx context.Context, cli client.Reader, namespace, clusterName, compName, role string, - opts ...client.ListOption) ([]*corev1.Pod, error) { - roleLabel := map[string]string{constant.RoleLabelKey: role} - return listPods(ctx, cli, namespace, clusterName, compName, roleLabel, opts...) -} - func ListOwnedServices(ctx context.Context, cli client.Reader, namespace, clusterName, compName string, opts ...client.ListOption) ([]*corev1.Service, error) { labels := constant.GetCompLabels(clusterName, compName) @@ -79,20 +67,6 @@ func listWorkloads(ctx context.Context, cli client.Reader, namespace, clusterNam return listObjWithLabelsInNamespace(ctx, cli, generics.InstanceSetSignature, namespace, labels) } -func listPods(ctx context.Context, cli client.Reader, namespace, clusterName, compName string, - labels map[string]string, opts ...client.ListOption) ([]*corev1.Pod, error) { - if labels == nil { - labels = constant.GetCompLabels(clusterName, compName) - } else { - maps.Copy(labels, constant.GetCompLabels(clusterName, compName)) - } - if opts == nil { - opts = make([]client.ListOption, 0) - } - opts = append(opts, inDataContext()) // TODO: pod - return listObjWithLabelsInNamespace(ctx, cli, generics.PodSignature, namespace, labels, opts...) -} - func listObjWithLabelsInNamespace[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]]( ctx context.Context, cli client.Reader, _ func(T, PT, L, PL), namespace string, labels client.MatchingLabels, opts ...client.ListOption) ([]PT, error) { if opts == nil { diff --git a/pkg/controller/instance/reconciler_assistant_object.go b/pkg/controller/instance/reconciler_assistant_object.go index 47349bf70fd..0c1c0b0c376 100644 --- a/pkg/controller/instance/reconciler_assistant_object.go +++ b/pkg/controller/instance/reconciler_assistant_object.go @@ -30,7 +30,6 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" @@ -71,27 +70,17 @@ func (r *assistantObjectReconciler) createOrUpdate(tree *kubebuilderx.ObjectTree if obj == nil { return nil // skip the object } + r.withInstAnnotationsNLabels(inst, obj) + robj, err := tree.Get(obj) if err != nil && !errors.IsNotFound(err) { return err } + if err != nil || robj == nil { - labels := obj.GetLabels() - if labels == nil { - labels = getMatchLabels(inst.Name) - } else { - maps.Copy(labels, getMatchLabels(inst.Name)) - } - obj.SetLabels(labels) - if err := controllerutil.SetControllerReference(inst, obj, model.GetScheme()); err != nil { - return err - } - return tree.Add(obj) - } - if merged := r.copyAndMerge(assistantObj, robj, obj); merged != nil { - return tree.Update(merged) + return r.create(tree, inst, obj) } - return nil + return r.update(tree, assistantObj, robj, obj) } func (r *assistantObjectReconciler) instanceAssistantObject(obj workloads.InstanceAssistantObject) client.Object { @@ -133,9 +122,55 @@ func (r *assistantObjectReconciler) checkObjectProvisionPolicy(inst *workloads.I return nil } +func (r *assistantObjectReconciler) withInstAnnotationsNLabels(inst *workloads.Instance, obj client.Object) { + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[constant.KubeBlocksGenerationKey] = inst.Annotations[constant.KubeBlocksGenerationKey] + obj.SetAnnotations(annotations) + + labels := obj.GetLabels() + if labels == nil { + labels = getMatchLabels(inst.Name) + } else { + maps.Copy(labels, getMatchLabels(inst.Name)) + } + obj.SetLabels(labels) +} + +func (r *assistantObjectReconciler) create(tree *kubebuilderx.ObjectTree, inst *workloads.Instance, obj client.Object) error { + // TODO: shared assistant objects + // if err := controllerutil.SetControllerReference(inst, obj, model.GetScheme()); err != nil { + // return err + // } + return tree.Add(obj) +} + +func (r *assistantObjectReconciler) update(tree *kubebuilderx.ObjectTree, assistantObj workloads.InstanceAssistantObject, robj, obj client.Object) error { + ng, og := r.generation(obj), r.generation(robj) + if ng > 0 && og > 0 && ng < og { + return nil + } + merged := r.copyAndMerge(assistantObj, robj, obj) + if merged == nil { + return nil + } + return tree.Update(merged) +} + +func (r *assistantObjectReconciler) generation(obj client.Object) int64 { + g := int64(-1) + s := obj.GetAnnotations()[constant.KubeBlocksGenerationKey] + if len(s) > 0 { + g, _ = strconv.ParseInt(s, 10, 64) + } + return g +} + func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistantObject, oldObj, newObj client.Object) client.Object { service := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { o1 := o.(*corev1.Service) n1 := n.(*corev1.Service) @@ -154,7 +189,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant }) } cm := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { return reflect.DeepEqual(o.(*corev1.ConfigMap).Data, n.(*corev1.ConfigMap).Data) }, @@ -163,7 +198,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant }) } secret := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { return reflect.DeepEqual(o.(*corev1.Secret).Data, n.(*corev1.Secret).Data) }, @@ -172,7 +207,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant }) } sa := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { return reflect.DeepEqual(o.(*corev1.ServiceAccount).Secrets, n.(*corev1.ServiceAccount).Secrets) }, @@ -181,7 +216,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant }) } role := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { return reflect.DeepEqual(o.(*rbacv1.Role).Rules, n.(*rbacv1.Role).Rules) }, @@ -190,7 +225,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant }) } roleBinding := func() client.Object { - return copyAndMergeAssistantObject(oldObj, newObj, + return r.copyAndMergeAssistantObject(oldObj, newObj, func(o, n client.Object) bool { o1 := o.(*rbacv1.RoleBinding) n1 := n.(*rbacv1.RoleBinding) @@ -221,7 +256,7 @@ func (r *assistantObjectReconciler) copyAndMerge(obj workloads.InstanceAssistant return roleBinding() } -func copyAndMergeAssistantObject(oldObj, newObj client.Object, equal func(o, n client.Object) bool, set func(o, n client.Object)) client.Object { +func (r *assistantObjectReconciler) copyAndMergeAssistantObject(oldObj, newObj client.Object, equal func(o, n client.Object) bool, set func(o, n client.Object)) client.Object { if reflect.DeepEqual(oldObj.GetLabels(), newObj.GetLabels()) && reflect.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()) && equal(oldObj, newObj) { diff --git a/pkg/controller/instance/reconciler_deletion.go b/pkg/controller/instance/reconciler_deletion.go index 19e5cab89b6..0e7e54ea67d 100644 --- a/pkg/controller/instance/reconciler_deletion.go +++ b/pkg/controller/instance/reconciler_deletion.go @@ -20,6 +20,7 @@ along with this program. If not, see . package instance import ( + "fmt" "maps" corev1 "k8s.io/api/core/v1" @@ -58,6 +59,11 @@ func (r *deletionReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuild retainPVC = pvcRetentionPolicy != nil && pvcRetentionPolicy.WhenScaled == appsv1.RetainPersistentVolumeClaimRetentionPolicyType } + // call the lifecycle action before delete resources + if call, err := r.lifecycleDeleteInstance(tree, inst); call || err != nil { + return kubebuilderx.Continue, err + } + // delete secondary objects first if has, err := r.deleteSecondaryObjects(tree, retainPVC); has { return kubebuilderx.Continue, err @@ -68,6 +74,22 @@ func (r *deletionReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuild return kubebuilderx.Continue, nil } +func (r *deletionReconciler) lifecycleDeleteInstance(tree *kubebuilderx.ObjectTree, inst *workloads.Instance) (bool, error) { + pods := tree.List(&corev1.Pod{}) + if len(pods) == 0 && ptr.Deref(inst.Status.MemberJoined, false) { + return false, fmt.Errorf("there is no pod to call the member-leave action") + } + + if len(pods) > 0 && ptr.Deref(inst.Status.MemberJoined, false) { + if err := lifecycleDeleteInstance(tree, inst, pods[0].(*corev1.Pod)); err != nil { + return false, err + } + inst.Status.MemberJoined = ptr.To(false) + return true, nil + } + return false, nil +} + func (r *deletionReconciler) deleteSecondaryObjects(tree *kubebuilderx.ObjectTree, retainPVC bool) (bool, error) { // secondary objects to be deleted secondaryObjects := maps.Clone(tree.GetSecondaryObjects()) diff --git a/pkg/controller/instance/reconciler_membership.go b/pkg/controller/instance/reconciler_membership.go new file mode 100644 index 00000000000..d7343e3d02e --- /dev/null +++ b/pkg/controller/instance/reconciler_membership.go @@ -0,0 +1,165 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package instance + +import ( + "errors" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +func NewMembershipReconciler() kubebuilderx.Reconciler { + return &membershipReconciler{} +} + +type membershipReconciler struct{} + +var _ kubebuilderx.Reconciler = &membershipReconciler{} + +func (r *membershipReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { + if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { + return kubebuilderx.ConditionUnsatisfied + } + if model.IsReconciliationPaused(tree.GetRoot()) { + return kubebuilderx.ConditionUnsatisfied + } + return kubebuilderx.ConditionSatisfied +} + +func (r *membershipReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) { + inst, _ := tree.GetRoot().(*workloads.Instance) + pods := tree.List(&corev1.Pod{}) + + if !inst.Status.Provisioned { + if len(pods) == 0 { + return kubebuilderx.Continue, nil // wait provision + } else { + inst.Status.Provisioned = true + inst.Status.DataLoaded = shouldLoadData(inst) + inst.Status.MemberJoined = shouldJoinMember(inst) + } + } + + var err error + if len(pods) > 0 { + err = lifecycleCreateInstance(tree, inst, pods[0].(*corev1.Pod)) + } + return kubebuilderx.Continue, err +} + +func lifecycleCreateInstance(tree *kubebuilderx.ObjectTree, inst *workloads.Instance, pod *corev1.Pod) error { + if !inst.Status.Provisioned { + return nil + } + if inst.Status.DataLoaded != nil && !*inst.Status.DataLoaded { + return nil // loading + } + if inst.Status.MemberJoined == nil || *inst.Status.MemberJoined { + return nil // not defined or joined + } + if err := lifecycleJoinMember(tree, inst, pod); err != nil { + tree.Logger.Info("failed to join member", "error", err.Error()) + } else { + inst.Status.MemberJoined = ptr.To(true) + } + return nil +} + +func lifecycleJoinMember(tree *kubebuilderx.ObjectTree, inst *workloads.Instance, pod *corev1.Pod) error { + lfa, err := newLifecycleAction(inst, pod) + if err != nil { + return err + } + if err = lfa.MemberJoin(tree.Context, tree.Reader, nil); err != nil { + if !errors.Is(err, lifecycle.ErrActionNotDefined) { + return err + } + } + tree.Logger.Info("succeed to call member join action") + return nil +} + +func lifecycleDeleteInstance(tree *kubebuilderx.ObjectTree, inst *workloads.Instance, pod *corev1.Pod) error { + if ptr.Deref(inst.Status.MemberJoined, false) { + if err := lifecycleLeaveMember(tree, inst, pod); err != nil { + return err + } + } + return nil +} + +func lifecycleLeaveMember(tree *kubebuilderx.ObjectTree, inst *workloads.Instance, pod *corev1.Pod) error { + switchover := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { + if inst.Spec.LifecycleActions.Switchover == nil { + return nil + } + err := lfa.Switchover(tree.Context, tree.Reader, nil, "") + if err != nil { + if errors.Is(err, lifecycle.ErrActionNotDefined) { + return nil + } + return err + } + tree.Logger.Info("succeed to call switchover action before leave member") + return nil + } + + memberLeave := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { + err := lfa.MemberLeave(tree.Context, tree.Reader, nil) + if err != nil { + if errors.Is(err, lifecycle.ErrActionNotDefined) { + return nil + } + return err + } + tree.Logger.Info("succeed to call leave member action") + return nil + } + + lfa, err := newLifecycleAction(inst, pod) + if err != nil { + return err + + } + if err = switchover(lfa, pod); err != nil { + tree.Logger.Error(err, "failed to call switchover action before leave member, ignore and continue") + } + return memberLeave(lfa, pod) +} + +func shouldLoadData(inst *workloads.Instance) *bool { + if inst.Spec.LifecycleActions != nil && inst.Spec.LifecycleActions.DataLoad != nil { + return ptr.To(false) + } + return nil +} + +func shouldJoinMember(inst *workloads.Instance) *bool { + if inst.Spec.LifecycleActions != nil && inst.Spec.LifecycleActions.MemberJoin != nil { + return ptr.To(false) + } + return nil +} diff --git a/pkg/controller/instance/reconciler_status.go b/pkg/controller/instance/reconciler_status.go index bfa44542c68..1f56ab6501e 100644 --- a/pkg/controller/instance/reconciler_status.go +++ b/pkg/controller/instance/reconciler_status.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" @@ -65,11 +66,6 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder ready, available, updated := false, false, false notReadyName, notAvailableName := "", "" - // podToNodeMapping, err := ParseNodeSelectorOnceAnnotation(inst) - // if err != nil { - // return kubebuilderx.Continue, err - // } - if isCreated(pod) { notReadyName = pod.Name } @@ -89,16 +85,6 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder } } - // TODO: ??? - // if nodeName, ok := podToNodeMapping[pod.Name]; ok { - // // there's chance that a pod is currently running and wait to be deleted so that it can be rescheduled - // if pod.Spec.NodeName == nodeName { - // if err := deleteNodeSelectorOnceAnnotation(its, pod.Name); err != nil { - // return kubebuilderx.Continue, err - // } - // } - // } - inst.Status.CurrentRevision = getPodRevision(pod) if updated { inst.Status.CurrentRevision = inst.Status.UpdateRevision @@ -121,7 +107,8 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder inst.Status.Ready = ready inst.Status.Available = available inst.Status.Role = r.observedRoleOfPod(inst, pod) - inst.Status.VolumeExpansion = r.hasRunningVolumeExpansion(tree, inst) + r.buildLifecycleStatus(inst, pod) + inst.Status.InVolumeExpansion = r.hasRunningVolumeExpansion(tree, inst) if inst.Spec.MinReadySeconds > 0 && !available { return kubebuilderx.RetryAfter(time.Second), nil @@ -197,6 +184,25 @@ func (r *statusReconciler) observedRoleOfPod(inst *workloads.Instance, pod *core return "" } +func (r *statusReconciler) buildLifecycleStatus(inst *workloads.Instance, pod *corev1.Pod) { + dataLoaded := func() *bool { + if inst.Spec.LifecycleActions == nil || inst.Spec.LifecycleActions.DataLoad == nil { + return nil + } + if inst.Status.DataLoaded == nil || *inst.Status.DataLoaded { + return inst.Status.DataLoaded + } + loaded, ok := pod.Annotations[constant.LifeCycleDataLoadedAnnotationKey] + if !ok { + return ptr.To(false) + } + return ptr.To(strings.ToLower(loaded) == "true") + } + + inst.Status.Provisioned = true + inst.Status.DataLoaded = dataLoaded() +} + func (r *statusReconciler) hasRunningVolumeExpansion(tree *kubebuilderx.ObjectTree, inst *workloads.Instance) bool { pvcs := tree.List(&corev1.PersistentVolumeClaim{}) var pvcList []*corev1.PersistentVolumeClaim diff --git a/pkg/controller/instance/reconciler_update.go b/pkg/controller/instance/reconciler_update.go index 0f97ccdb78d..5b3dc01f3c5 100644 --- a/pkg/controller/instance/reconciler_update.go +++ b/pkg/controller/instance/reconciler_update.go @@ -195,7 +195,7 @@ func (r *updateReconciler) switchover(tree *kubebuilderx.ObjectTree, inst *workl return nil } - lfa, err := newLifecycleAction(inst, nil, pod) + lfa, err := newLifecycleAction(inst, pod) if err != nil { return err } diff --git a/pkg/controller/instance/tree_loader.go b/pkg/controller/instance/tree_loader.go index 4202968f642..5b1bd3f87a7 100644 --- a/pkg/controller/instance/tree_loader.go +++ b/pkg/controller/instance/tree_loader.go @@ -21,6 +21,7 @@ package instance import ( "context" + "reflect" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -30,7 +31,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" + "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/model" ) func NewTreeLoader() kubebuilderx.TreeLoader { @@ -43,12 +46,16 @@ var _ kubebuilderx.TreeLoader = &treeLoader{} func (r *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Request, recorder record.EventRecorder, logger logr.Logger) (*kubebuilderx.ObjectTree, error) { ml := getMatchLabels(req.Name) - kinds := ownedKinds() + kinds := r.ownedKinds() tree, err := kubebuilderx.ReadObjectTree[*workloads.Instance](ctx, reader, req, ml, kinds...) if err != nil { return nil, err } + if err = r.readAssociatedObjects(ctx, reader, req, tree); err != nil { + return nil, err + } + tree.Context = ctx tree.EventRecorder = recorder tree.Logger = logger @@ -58,10 +65,47 @@ func (r *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Re return tree, err } -func ownedKinds() []client.ObjectList { +func (r *treeLoader) readAssociatedObjects(ctx context.Context, reader client.Reader, req ctrl.Request, tree *kubebuilderx.ObjectTree) error { + root := tree.GetRoot() + if root != nil { + inNS := client.InNamespace(req.Namespace) + ml := client.MatchingLabels(map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: root.GetLabels()[constant.AppInstanceLabelKey], + constant.KBAppComponentLabelKey: root.GetLabels()[constant.KBAppComponentLabelKey], + }) + for _, list := range r.associatedObjectKinds() { + if err := reader.List(ctx, list, inNS, ml); err != nil { + return err + } + // reflect get list.Items + items := reflect.ValueOf(list).Elem().FieldByName("Items") + l := items.Len() + for i := 0; i < l; i++ { + // get the underlying object + object := items.Index(i).Addr().Interface().(client.Object) + if len(object.GetOwnerReferences()) > 0 && !model.IsOwnerOf(root, object) { + continue + } + if err := tree.Add(object); err != nil { + return err + } + } + } + } + return nil +} + +func (r *treeLoader) ownedKinds() []client.ObjectList { return []client.ObjectList{ &corev1.PodList{}, &corev1.PersistentVolumeClaimList{}, + } +} + +func (r *treeLoader) associatedObjectKinds() []client.ObjectList { + return []client.ObjectList{ + &corev1.ServiceList{}, &corev1.ConfigMapList{}, // config & script, env &corev1.SecretList{}, // account, tls &corev1.ServiceAccountList{}, diff --git a/pkg/controller/instance/utils.go b/pkg/controller/instance/utils.go index eefb2def144..a9885a6a4a4 100644 --- a/pkg/controller/instance/utils.go +++ b/pkg/controller/instance/utils.go @@ -20,6 +20,7 @@ along with this program. If not, see . package instance import ( + "fmt" "reflect" "slices" "strings" @@ -36,6 +37,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" "github.com/apecloud/kubeblocks/pkg/controller/model" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/kbagent" ) func podName(inst *workloads.Instance) string { @@ -351,19 +353,45 @@ func copyAndMerge(oldObj, newObj client.Object) client.Object { } } -func newLifecycleAction(inst *workloads.Instance, objects []client.Object, pod *corev1.Pod) (lifecycle.Lifecycle, error) { +func newLifecycleAction(inst *workloads.Instance, pod *corev1.Pod) (lifecycle.Lifecycle, error) { var ( clusterName = inst.Labels[constant.AppInstanceLabelKey] compName = inst.Labels[constant.KBAppComponentLabelKey] lifecycleActions = &kbappsv1.ComponentLifecycleActions{ Switchover: inst.Spec.LifecycleActions.Switchover, + MemberJoin: inst.Spec.LifecycleActions.MemberJoin, + MemberLeave: inst.Spec.LifecycleActions.MemberLeave, Reconfigure: inst.Spec.LifecycleActions.Reconfigure, } - pods []*corev1.Pod + replica = &lifecycleReplica{ + Pod: *pod, + } ) - for i := range objects { - pods = append(pods, objects[i].(*corev1.Pod)) - } return lifecycle.New(inst.Namespace, clusterName, compName, - lifecycleActions, inst.Spec.LifecycleActions.TemplateVars, pod, pods...) + lifecycleActions, inst.Spec.LifecycleActions.TemplateVars, replica) +} + +type lifecycleReplica struct { + corev1.Pod +} + +func (r *lifecycleReplica) Namespace() string { + return r.ObjectMeta.Namespace +} + +func (r *lifecycleReplica) Name() string { + return r.ObjectMeta.Name +} + +func (r *lifecycleReplica) Role() string { + return r.ObjectMeta.Labels[constant.RoleLabelKey] +} + +func (r *lifecycleReplica) Endpoint() (string, int32, error) { + port, err := intctrlutil.GetPortByName(r.Pod, kbagent.ContainerName, kbagent.DefaultHTTPPortName) + return r.Status.PodIP, port, err +} + +func (r *lifecycleReplica) StreamingEndpoint() (string, int32, error) { + return "", 0, fmt.Errorf("NotSupported") } diff --git a/pkg/controller/instanceset/in_place_update_util_test.go b/pkg/controller/instanceset/in_place_update_util_test.go index 872583c9f70..c6ad4b97a9a 100644 --- a/pkg/controller/instanceset/in_place_update_util_test.go +++ b/pkg/controller/instanceset/in_place_update_util_test.go @@ -133,7 +133,7 @@ var _ = Describe("instance util test", func() { Expect(res).Should(Equal(kubebuilderx.Continue)) By("replicas alignment") - reconciler = NewReplicasAlignmentReconciler() + reconciler = NewInstanceAlignmentReconciler() res, err = reconciler.Reconcile(tree) Expect(err).Should(BeNil()) Expect(res).Should(Equal(kubebuilderx.Continue)) diff --git a/pkg/controller/instanceset/reconciler_instance_alignment.go b/pkg/controller/instanceset/reconciler_instance_alignment.go index a98e1553096..46f3d394297 100644 --- a/pkg/controller/instanceset/reconciler_instance_alignment.go +++ b/pkg/controller/instanceset/reconciler_instance_alignment.go @@ -23,6 +23,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" @@ -33,16 +34,16 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) +func NewInstanceAlignmentReconciler() kubebuilderx.Reconciler { + return &instanceAlignmentReconciler{} +} + // instanceAlignmentReconciler is responsible for aligning the actual instances(pods) with the desired replicas specified in the spec, // including horizontal scaling and recovering from unintended pod deletions etc. // only handle instance count, don't care instance revision. -// -// TODO(free6om): support membership reconfiguration type instanceAlignmentReconciler struct{} -func NewReplicasAlignmentReconciler() kubebuilderx.Reconciler { - return &instanceAlignmentReconciler{} -} +var _ kubebuilderx.Reconciler = &instanceAlignmentReconciler{} func (r *instanceAlignmentReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { @@ -140,7 +141,7 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( if err != nil { return kubebuilderx.Continue, err } - if err := tree.Add(newPod); err != nil { + if err := tree.AddWithOption(newPod, r.createInstance(tree, its, oldInstanceList, newPod)); err != nil { return kubebuilderx.Continue, err } currentAlignedNameList = append(currentAlignedNameList, name) @@ -193,7 +194,7 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( its.Name, pod.Name) } - if err := tree.Delete(pod); err != nil { + if err := tree.DeleteWithOption(pod, r.deleteInstance(tree, its, oldInstanceList, pod)); err != nil { return kubebuilderx.Continue, err } @@ -219,4 +220,16 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( return kubebuilderx.Continue, nil } -var _ kubebuilderx.Reconciler = &instanceAlignmentReconciler{} +func (r *instanceAlignmentReconciler) createInstance(tree *kubebuilderx.ObjectTree, + its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) kubebuilderx.WithPostHook { + return func(obj client.Object) error { + return lifecycleCreateInstance(tree, its, pods, obj.(*corev1.Pod)) + } +} + +func (r *instanceAlignmentReconciler) deleteInstance(tree *kubebuilderx.ObjectTree, + its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) kubebuilderx.WithPrevHook { + return func(obj client.Object) error { + return lifecycleDeleteInstance(tree, its, pods, pod) + } +} diff --git a/pkg/controller/instanceset/reconciler_instance_alignment_test.go b/pkg/controller/instanceset/reconciler_instance_alignment_test.go index f23e281aeba..0ffc8162ceb 100644 --- a/pkg/controller/instanceset/reconciler_instance_alignment_test.go +++ b/pkg/controller/instanceset/reconciler_instance_alignment_test.go @@ -52,7 +52,7 @@ var _ = Describe("replicas alignment reconciler test", func() { its.Generation = 1 tree := kubebuilderx.NewObjectTree() tree.SetRoot(its) - reconciler = NewReplicasAlignmentReconciler() + reconciler = NewInstanceAlignmentReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ConditionSatisfied)) By("prepare current tree") diff --git a/pkg/controller/instanceset/reconciler_membership.go b/pkg/controller/instanceset/reconciler_membership.go new file mode 100644 index 00000000000..254a6a1cd4e --- /dev/null +++ b/pkg/controller/instanceset/reconciler_membership.go @@ -0,0 +1,214 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package instanceset + +import ( + "errors" + "slices" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" + "github.com/apecloud/kubeblocks/pkg/controller/instancetemplate" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +func NewMembershipReconciler() kubebuilderx.Reconciler { + return &membershipReconciler{} +} + +type membershipReconciler struct{} + +var _ kubebuilderx.Reconciler = &membershipReconciler{} + +func (r *membershipReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { + if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { + return kubebuilderx.ConditionUnsatisfied + } + if model.IsReconciliationPaused(tree.GetRoot()) { + return kubebuilderx.ConditionUnsatisfied + } + return kubebuilderx.ConditionSatisfied +} + +func (r *membershipReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) { + its, _ := tree.GetRoot().(*workloads.InstanceSet) + itsExt, err := instancetemplate.BuildInstanceSetExt(its, tree) + if err != nil { + return kubebuilderx.Continue, err + } + + nameBuilder, err := instancetemplate.NewPodNameBuilder( + itsExt, &instancetemplate.PodNameBuilderOpts{EventLogger: tree.EventRecorder}, + ) + if err != nil { + return kubebuilderx.Continue, err + } + nameToTemplateMap, err := nameBuilder.BuildInstanceName2TemplateMap() + if err != nil { + return kubebuilderx.Continue, err + } + + newNameSet := sets.New[string]() + for name := range nameToTemplateMap { + newNameSet.Insert(name) + } + oldNameSet := sets.New[string]() + pods := tree.List(&corev1.Pod{}) + for _, pod := range pods { + oldNameSet.Insert(pod.GetName()) + } + + for _, pod := range pods { + if newNameSet.Has(pod.GetName()) { + if err = lifecycleCreateInstance(tree, its, pods, pod.(*corev1.Pod)); err != nil { + return kubebuilderx.Continue, err + } + } + } + + its.Status.InstanceStatus = slices.DeleteFunc(its.Status.InstanceStatus, func(inst workloads.InstanceStatus) bool { + // The pod has been deleted, but the subsequent update of ITS status failed. Remove it from InstanceStatus directly. + return !newNameSet.Has(inst.PodName) && !oldNameSet.Has(inst.PodName) + }) + + return kubebuilderx.Continue, nil +} + +func lifecycleCreateInstance(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) error { + idx := slices.IndexFunc(its.Status.InstanceStatus, func(inst workloads.InstanceStatus) bool { + return inst.PodName == pod.Name + }) + if idx < 0 { + its.Status.InstanceStatus = append(its.Status.InstanceStatus, workloads.InstanceStatus{ + PodName: pod.Name, + Provisioned: true, + DataLoaded: shouldLoadData(its), + MemberJoined: shouldJoinMember(its), + }) + idx = len(its.Status.InstanceStatus) - 1 + } + + inst := its.Status.InstanceStatus[idx] + if !inst.Provisioned { + return nil + } + if inst.DataLoaded != nil && !*inst.DataLoaded { + return nil // loading + } + if inst.MemberJoined == nil || *inst.MemberJoined { + return nil // not defined or joined + } + if err := lifecycleJoinMember(tree, its, pods, pod); err != nil { + tree.Logger.Info("failed to join member", "pod", pod.Name, "error", err.Error()) + } else { + its.Status.InstanceStatus[idx].MemberJoined = ptr.To(true) + } + return nil +} + +func lifecycleJoinMember(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) error { + lfa, err := newLifecycleAction(its, pods, pod) + if err != nil { + return err + } + if err = lfa.MemberJoin(tree.Context, tree.Reader, nil); err != nil { + if !errors.Is(err, lifecycle.ErrActionNotDefined) { + return err + } + } + tree.Logger.Info("succeed to call member join action", "pod", pod.Name) + return nil +} + +func lifecycleDeleteInstance(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) error { + idx := slices.IndexFunc(its.Status.InstanceStatus, func(inst workloads.InstanceStatus) bool { + return inst.PodName == pod.Name + }) + if idx < 0 { + return nil + } + inst := its.Status.InstanceStatus[idx] + if ptr.Deref(inst.MemberJoined, false) { + if err := lifecycleLeaveMember(tree, its, pods, pod); err != nil { + return err + } + } + its.Status.InstanceStatus = slices.Delete(its.Status.InstanceStatus, idx, idx+1) + return nil +} + +func lifecycleLeaveMember(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []client.Object, pod *corev1.Pod) error { + switchover := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { + if its.Spec.LifecycleActions.Switchover == nil { + return nil + } + err := lfa.Switchover(tree.Context, tree.Reader, nil, "") + if err != nil { + if errors.Is(err, lifecycle.ErrActionNotDefined) { + return nil + } + return err + } + tree.Logger.Info("succeed to call switchover action before leave member", "pod", pod.Name) + return nil + } + + memberLeave := func(lfa lifecycle.Lifecycle, pod *corev1.Pod) error { + err := lfa.MemberLeave(tree.Context, tree.Reader, nil) + if err != nil { + if errors.Is(err, lifecycle.ErrActionNotDefined) { + return nil + } + return err + } + tree.Logger.Info("succeed to call leave member action", "pod", pod.Name) + return nil + } + + lfa, err := newLifecycleAction(its, pods, pod) + if err != nil { + return err + + } + if err = switchover(lfa, pod); err != nil { + tree.Logger.Error(err, "failed to call switchover action before leave member, ignore and continue", "pod", pod.Name) + } + return memberLeave(lfa, pod) +} + +func shouldLoadData(its *workloads.InstanceSet) *bool { + if its.Spec.LifecycleActions != nil && its.Spec.LifecycleActions.DataLoad != nil { + return ptr.To(its.IsInInitializing()) + } + return nil +} + +func shouldJoinMember(its *workloads.InstanceSet) *bool { + if its.Spec.LifecycleActions != nil && its.Spec.LifecycleActions.MemberJoin != nil { + return ptr.To(its.IsInInitializing()) + } + return nil +} diff --git a/pkg/controller/instanceset/reconciler_revision_update.go b/pkg/controller/instanceset/reconciler_revision_update.go index 081f6b0f135..2ac1ed936ab 100644 --- a/pkg/controller/instanceset/reconciler_revision_update.go +++ b/pkg/controller/instanceset/reconciler_revision_update.go @@ -21,6 +21,7 @@ package instanceset import ( corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" @@ -29,18 +30,20 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/model" ) +func NewRevisionUpdateReconciler() kubebuilderx.Reconciler { + return &revisionUpdateReconciler{} +} + // revisionUpdateReconciler is responsible for updating the expected instance names and their corresponding revisions in the status when there are changes in the spec. type revisionUpdateReconciler struct{} +var _ kubebuilderx.Reconciler = &revisionUpdateReconciler{} + type instanceRevision struct { name string revision string } -func NewRevisionUpdateReconciler() kubebuilderx.Reconciler { - return &revisionUpdateReconciler{} -} - func (r *revisionUpdateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { if tree.GetRoot() == nil || !model.IsObjectUpdating(tree.GetRoot()) { return kubebuilderx.ConditionUnsatisfied @@ -89,11 +92,14 @@ func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kub updateRevision = instanceRevisionList[len(instanceRevisionList)-1].revision } its.Status.UpdateRevision = updateRevision - updatedReplicas, err := calculateUpdatedReplicas(its, tree.List(&corev1.Pod{})) + + updatedReplicas, err := r.calculateUpdatedReplicas(its, tree.List(&corev1.Pod{})) if err != nil { return kubebuilderx.Continue, err } its.Status.UpdatedReplicas = updatedReplicas + its.Status.InitReplicas = r.buildInitReplicas(its) + // The 'ObservedGeneration' field is used to indicate whether the revisions have been updated. // Computing these revisions in each reconciliation loop can be time-consuming, so we optimize it by // performing the computation only when the 'spec' is updated. @@ -102,7 +108,7 @@ func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kub return kubebuilderx.Continue, nil } -func calculateUpdatedReplicas(its *workloads.InstanceSet, pods []client.Object) (int32, error) { +func (r *revisionUpdateReconciler) calculateUpdatedReplicas(its *workloads.InstanceSet, pods []client.Object) (int32, error) { updatedReplicas := int32(0) for i := range pods { pod, _ := pods[i].(*corev1.Pod) @@ -113,9 +119,26 @@ func calculateUpdatedReplicas(its *workloads.InstanceSet, pods []client.Object) if updated { updatedReplicas++ } - } return updatedReplicas, nil } -var _ kubebuilderx.Reconciler = &revisionUpdateReconciler{} +func (r *revisionUpdateReconciler) buildInitReplicas(its *workloads.InstanceSet) *int32 { + initReplicas := its.Status.InitReplicas + if initReplicas == nil && ptr.Deref(its.Spec.Replicas, 0) > 0 { + initReplicas = its.Spec.Replicas + } + if initReplicas == nil { + return nil // the replicas is not set or set to 0 + } + + if *initReplicas != ptr.Deref(its.Status.ReadyInitReplicas, 0) { // in init phase + // in case the replicas is changed in the middle of init phase + if ptr.Deref(its.Spec.Replicas, 0) == 0 { + return nil + } else { + return its.Spec.Replicas + } + } + return initReplicas +} diff --git a/pkg/controller/instanceset/reconciler_status.go b/pkg/controller/instanceset/reconciler_status.go index fb71b4be368..77dcfc4960e 100644 --- a/pkg/controller/instanceset/reconciler_status.go +++ b/pkg/controller/instanceset/reconciler_status.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" @@ -40,15 +41,15 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) +func NewStatusReconciler() kubebuilderx.Reconciler { + return &statusReconciler{} +} + // statusReconciler computes the current status type statusReconciler struct{} var _ kubebuilderx.Reconciler = &statusReconciler{} -func NewStatusReconciler() kubebuilderx.Reconciler { - return &statusReconciler{} -} - func (r *statusReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { if tree.GetRoot() == nil || !model.IsObjectStatusUpdating(tree.GetRoot()) { return kubebuilderx.ConditionUnsatisfied @@ -147,6 +148,8 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder } } } + + its.Status.ReadyInitReplicas = r.buildReadyInitReplicas(its, readyReplicas) its.Status.Replicas = replicas its.Status.Ordinals = ordinals slices.Sort(its.Status.Ordinals) @@ -155,7 +158,7 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder its.Status.CurrentReplicas = currentReplicas its.Status.UpdatedReplicas = updatedReplicas its.Status.CurrentRevisions, _ = buildRevisions(currentRevisions) - its.Status.TemplatesStatus = buildTemplatesStatus(template2TemplatesStatus) + its.Status.TemplatesStatus = r.buildTemplatesStatus(template2TemplatesStatus) // all pods have been updated totalReplicas := int32(1) if its.Spec.Replicas != nil { @@ -172,20 +175,20 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder } } - readyCondition, err := buildReadyCondition(its, readyReplicas >= replicas, notReadyNames) + readyCondition, err := r.buildReadyCondition(its, readyReplicas >= replicas, notReadyNames) if err != nil { return kubebuilderx.Continue, err } meta.SetStatusCondition(&its.Status.Conditions, *readyCondition) - availableCondition, err := buildAvailableCondition(its, availableReplicas >= replicas, notAvailableNames) + availableCondition, err := r.buildAvailableCondition(its, availableReplicas >= replicas, notAvailableNames) if err != nil { return kubebuilderx.Continue, err } meta.SetStatusCondition(&its.Status.Conditions, *availableCondition) // 3. set InstanceFailure condition - failureCondition, err := buildFailureCondition(its, podList) + failureCondition, err := r.buildFailureCondition(its, podList) if err != nil { return kubebuilderx.Continue, err } @@ -195,8 +198,8 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder meta.RemoveStatusCondition(&its.Status.Conditions, string(workloads.InstanceFailure)) } - // 4. set instance status - setInstanceStatus(tree, its, podList) + // 4. build instance status + r.buildInstanceStatus(tree, its, podList) if its.Spec.MinReadySeconds > 0 && availableReplicas != readyReplicas { return kubebuilderx.RetryAfter(time.Second), nil @@ -204,14 +207,18 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder return kubebuilderx.Continue, nil } -func buildConditionMessageWithNames(podNames []string) ([]byte, error) { - baseSort(podNames, func(i int) (string, int) { - return parseParentNameAndOrdinal(podNames[i]) - }, nil, true) - return json.Marshal(podNames) +func (r *statusReconciler) buildReadyInitReplicas(its *workloads.InstanceSet, readyReplicas int32) *int32 { + if its.Status.InitReplicas == nil { + return nil + } + // init replicas cannot be zero + if *its.Status.InitReplicas == ptr.Deref(its.Status.ReadyInitReplicas, 0) { + return its.Status.ReadyInitReplicas + } + return ptr.To(readyReplicas) } -func buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.InstanceTemplateStatus) []workloads.InstanceTemplateStatus { +func (r *statusReconciler) buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.InstanceTemplateStatus) []workloads.InstanceTemplateStatus { var templatesStatus []workloads.InstanceTemplateStatus for templateName, templateStatus := range template2TemplatesStatus { if len(templateName) == 0 { @@ -226,7 +233,7 @@ func buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.Instanc return templatesStatus } -func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames sets.Set[string]) (*metav1.Condition, error) { +func (r *statusReconciler) buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames sets.Set[string]) (*metav1.Condition, error) { condition := &metav1.Condition{ Type: string(workloads.InstanceReady), Status: metav1.ConditionTrue, @@ -236,7 +243,7 @@ func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames s if !ready { condition.Status = metav1.ConditionFalse condition.Reason = workloads.ReasonNotReady - message, err := buildConditionMessageWithNames(notReadyNames.UnsortedList()) + message, err := r.buildConditionMessageWithNames(notReadyNames.UnsortedList()) if err != nil { return nil, err } @@ -245,7 +252,7 @@ func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames s return condition, nil } -func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvailableNames sets.Set[string]) (*metav1.Condition, error) { +func (r *statusReconciler) buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvailableNames sets.Set[string]) (*metav1.Condition, error) { condition := &metav1.Condition{ Type: string(workloads.InstanceAvailable), Status: metav1.ConditionTrue, @@ -255,7 +262,7 @@ func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvai if !available { condition.Status = metav1.ConditionFalse condition.Reason = workloads.ReasonNotAvailable - message, err := buildConditionMessageWithNames(notAvailableNames.UnsortedList()) + message, err := r.buildConditionMessageWithNames(notAvailableNames.UnsortedList()) if err != nil { return nil, err } @@ -264,7 +271,7 @@ func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvai return condition, nil } -func buildFailureCondition(its *workloads.InstanceSet, pods []*corev1.Pod) (*metav1.Condition, error) { +func (r *statusReconciler) buildFailureCondition(its *workloads.InstanceSet, pods []*corev1.Pod) (*metav1.Condition, error) { var failureNames []string for _, pod := range pods { if isTerminating(pod) { @@ -284,7 +291,7 @@ func buildFailureCondition(its *workloads.InstanceSet, pods []*corev1.Pod) (*met if len(failureNames) == 0 { return nil, nil } - message, err := buildConditionMessageWithNames(failureNames) + message, err := r.buildConditionMessageWithNames(failureNames) if err != nil { return nil, err } @@ -297,94 +304,87 @@ func buildFailureCondition(its *workloads.InstanceSet, pods []*corev1.Pod) (*met }, nil } -func setInstanceStatus(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []*corev1.Pod) { - instanceStatus := make([]workloads.InstanceStatus, 0) - for _, pod := range pods { - status := workloads.InstanceStatus{ - PodName: pod.Name, - } - instanceStatus = append(instanceStatus, status) - } - - syncMemberStatus(its, instanceStatus, pods) - - syncInstanceConfigStatus(its, instanceStatus) - - if tree != nil { - syncInstancePVCStatus(tree, its, instanceStatus) - } +func (r *statusReconciler) buildConditionMessageWithNames(podNames []string) ([]byte, error) { + baseSort(podNames, func(i int) (string, int) { + return parseParentNameAndOrdinal(podNames[i]) + }, nil, true) + return json.Marshal(podNames) +} - sortInstanceStatus(instanceStatus) - its.Status.InstanceStatus = instanceStatus +func (r *statusReconciler) buildInstanceStatus(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, pods []*corev1.Pod) { + r.buildInstanceRoleStatus(its, pods) + r.buildInstanceLifecycleStatus(its, pods) + r.buildInstancePVCStatus(tree, its) + r.sortInstanceStatus(its.Status.InstanceStatus) } -func sortInstanceStatus(instanceStatus []workloads.InstanceStatus) { +func (r *statusReconciler) sortInstanceStatus(instanceStatus []workloads.InstanceStatus) { getNameNOrdinalFunc := func(i int) (string, int) { return parseParentNameAndOrdinal(instanceStatus[i].PodName) } baseSort(instanceStatus, getNameNOrdinalFunc, nil, true) } -func syncMemberStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, pods []*corev1.Pod) { - if its.Spec.Roles != nil { - roleMap := composeRoleMap(*its) - for _, pod := range pods { - if !intctrlutil.PodIsReadyWithLabel(*pod) { - continue +func (r *statusReconciler) buildInstanceRoleStatus(its *workloads.InstanceSet, pods []*corev1.Pod) { + if its.Spec.Roles == nil { + return + } + setRole := func(name, role string) { + for i, inst := range its.Status.InstanceStatus { + if inst.PodName == name { + its.Status.InstanceStatus[i].Role = role + break } + } + } + roleMap := composeRoleMap(*its) + for _, pod := range pods { + if !intctrlutil.PodIsReadyWithLabel(*pod) { + setRole(pod.Name, "") + } else { roleName := getRoleName(pod) role, ok := roleMap[roleName] - if !ok { - continue - } - for i, inst := range instanceStatus { - if inst.PodName == pod.Name { - instanceStatus[i].Role = role.Name - break - } + if ok { + setRole(pod.Name, role.Name) } + } } } -func syncInstanceConfigStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus) { - if its.Status.InstanceStatus == nil { - // initialize - configs := make([]workloads.InstanceConfigStatus, 0) - for _, config := range its.Spec.Configs { - configs = append(configs, workloads.InstanceConfigStatus{ - Name: config.Name, - Generation: config.Generation, - }) +func (r *statusReconciler) buildInstanceLifecycleStatus(its *workloads.InstanceSet, pods []*corev1.Pod) { + dataLoaded := func(inst workloads.InstanceStatus, pod *corev1.Pod) *bool { + if its.Spec.LifecycleActions == nil || its.Spec.LifecycleActions.DataLoad == nil { + return nil } - for i := range instanceStatus { - instanceStatus[i].Configs = configs + if inst.DataLoaded == nil || *inst.DataLoaded { + return inst.DataLoaded } - } else { - // HACK: copy the existing config status from the current its.status.instanceStatus - configs := sets.New[string]() - for _, config := range its.Spec.Configs { - configs.Insert(config.Name) + loaded, ok := pod.Annotations[constant.LifeCycleDataLoadedAnnotationKey] + if !ok { + return ptr.To(false) } - for i, newStatus := range instanceStatus { - for _, status := range its.Status.InstanceStatus { - if status.PodName == newStatus.PodName { - if instanceStatus[i].Configs == nil { - instanceStatus[i].Configs = make([]workloads.InstanceConfigStatus, 0) - } - for j, config := range status.Configs { - if configs.Has(config.Name) { - instanceStatus[i].Configs = append(instanceStatus[i].Configs, status.Configs[j]) - } - } - break - } - } + return ptr.To(strings.ToLower(loaded) == "true") + } + + pm := make(map[string]*corev1.Pod) + for i, pod := range pods { + pm[pod.Name] = pods[i] + } + for i, inst := range its.Status.InstanceStatus { + pod, ok := pm[inst.PodName] + if !ok { + continue } + its.Status.InstanceStatus[i].Provisioned = true + its.Status.InstanceStatus[i].DataLoaded = dataLoaded(inst, pod) } } -func syncInstancePVCStatus(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus) { +func (r *statusReconciler) buildInstancePVCStatus(tree *kubebuilderx.ObjectTree, its *workloads.InstanceSet) { + if tree == nil { + return + } pvcs := tree.List(&corev1.PersistentVolumeClaim{}) var pvcList []*corev1.PersistentVolumeClaim for _, obj := range pvcs { @@ -405,10 +405,10 @@ func syncInstancePVCStatus(tree *kubebuilderx.ObjectTree, its *workloads.Instanc instName = pvc.Labels[constant.KBAppPodNameLabelKey] } if len(instName) > 0 { - for i, inst := range instanceStatus { + for i, inst := range its.Status.InstanceStatus { if inst.PodName == instName { // TODO: how to check the expansion failed? - instanceStatus[i].VolumeExpansion = true + its.Status.InstanceStatus[i].InVolumeExpansion = true break } } diff --git a/pkg/controller/instanceset/reconciler_status_test.go b/pkg/controller/instanceset/reconciler_status_test.go index 2ad15e0ed9f..4f5e8f26bc9 100644 --- a/pkg/controller/instanceset/reconciler_status_test.go +++ b/pkg/controller/instanceset/reconciler_status_test.go @@ -72,7 +72,7 @@ var _ = Describe("status reconciler test", func() { Expect(res).Should(Equal(kubebuilderx.Continue)) By("replicas alignment") - reconciler = NewReplicasAlignmentReconciler() + reconciler = NewInstanceAlignmentReconciler() res, err = reconciler.Reconcile(tree) Expect(err).Should(BeNil()) Expect(res).Should(Equal(kubebuilderx.Continue)) @@ -399,7 +399,8 @@ var _ = Describe("status reconciler test", func() { replicas := int32(3) its.Spec.Replicas = &replicas its.Status.InstanceStatus = oldInstanceStatus - setInstanceStatus(nil, its, pods) + r := &statusReconciler{} + r.buildInstanceStatus(nil, its, pods) Expect(its.Status.InstanceStatus).Should(HaveLen(3)) Expect(its.Status.InstanceStatus[0].PodName).Should(Equal("pod-0")) diff --git a/pkg/controller/instanceset/reconciler_update.go b/pkg/controller/instanceset/reconciler_update.go index c7800a78911..bf92ab33660 100644 --- a/pkg/controller/instanceset/reconciler_update.go +++ b/pkg/controller/instanceset/reconciler_update.go @@ -212,6 +212,8 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder updatingPods++ } } + // TODO: compose the status from pods but not the its spec and status + r.updateInstanceConfigStatus(its, pod) } if !isBlocked { @@ -243,7 +245,7 @@ func (r *updateReconciler) memberUpdateQuota(its *workloads.InstanceSet, podList // if it's a roleful InstanceSet, we use updateCount to represent Pods can be updated according to the spec.memberUpdateStrategy. updateCount := len(podList) if len(its.Spec.Roles) > 0 { - plan := NewUpdatePlan(*its, podList, r.isPodOrConfigUpdated) + plan := NewUpdatePlan(*its, podList, r.isInstanceUpdated) podsToBeUpdated, err := plan.Execute() if err != nil { return -1, err @@ -302,8 +304,6 @@ func (r *updateReconciler) reconfigure(tree *kubebuilderx.ObjectTree, its *workl return false, err } } - // TODO: compose the status from pods but not the its spec and status - r.setInstanceConfigStatus(its, pod, config) } return allUpdated, nil } @@ -342,35 +342,22 @@ func (r *updateReconciler) reconfigureConfig(tree *kubebuilderx.ObjectTree, its return nil } -func (r *updateReconciler) setInstanceConfigStatus(its *workloads.InstanceSet, pod *corev1.Pod, config workloads.ConfigTemplate) { - if its.Status.InstanceStatus == nil { - its.Status.InstanceStatus = make([]workloads.InstanceStatus, 0) - } - idx := slices.IndexFunc(its.Status.InstanceStatus, func(instance workloads.InstanceStatus) bool { +func (r *updateReconciler) updateInstanceConfigStatus(its *workloads.InstanceSet, pod *corev1.Pod) { + idx1 := slices.IndexFunc(its.Status.InstanceStatus, func(instance workloads.InstanceStatus) bool { return instance.PodName == pod.Name }) - if idx < 0 { - its.Status.InstanceStatus = append(its.Status.InstanceStatus, workloads.InstanceStatus{PodName: pod.Name}) - idx = len(its.Status.InstanceStatus) - 1 + if idx1 < 0 { + return // instance status for pod not found? } - if its.Status.InstanceStatus[idx].Configs == nil { - its.Status.InstanceStatus[idx].Configs = make([]workloads.InstanceConfigStatus, 0) - } - status := workloads.InstanceConfigStatus{ - Name: config.Name, - Generation: config.Generation, - } - for i, configStatus := range its.Status.InstanceStatus[idx].Configs { - if configStatus.Name == config.Name { - its.Status.InstanceStatus[idx].Configs[i] = status - return - } + var configs []workloads.InstanceConfigStatus + for _, config := range its.Spec.Configs { + configs = append(configs, workloads.InstanceConfigStatus{Name: config.Name, Generation: config.Generation}) } - its.Status.InstanceStatus[idx].Configs = append(its.Status.InstanceStatus[idx].Configs, status) + its.Status.InstanceStatus[idx1].Configs = configs } -func (r *updateReconciler) isPodOrConfigUpdated(its *workloads.InstanceSet, pod *corev1.Pod) (bool, error) { +func (r *updateReconciler) isInstanceUpdated(its *workloads.InstanceSet, pod *corev1.Pod) (bool, error) { policy, _, err := getPodUpdatePolicy(its, pod) if err != nil { return false, err @@ -393,9 +380,9 @@ func (r *updateReconciler) isConfigUpdated(its *workloads.InstanceSet, pod *core if idx < 0 { return true // new pod provisioned } - for _, configStatus := range its.Status.InstanceStatus[idx].Configs { - if configStatus.Name == config.Name { - return config.Generation <= configStatus.Generation + for _, status := range its.Status.InstanceStatus[idx].Configs { + if status.Name == config.Name { + return config.Generation <= status.Generation } } return config.Generation <= 0 diff --git a/pkg/controller/instanceset/reconciler_update_test.go b/pkg/controller/instanceset/reconciler_update_test.go index a720d8d48c4..5b876398aaf 100644 --- a/pkg/controller/instanceset/reconciler_update_test.go +++ b/pkg/controller/instanceset/reconciler_update_test.go @@ -78,7 +78,7 @@ var _ = Describe("update reconciler test", func() { Expect(res).Should(Equal(kubebuilderx.Continue)) By("replicas alignment") - reconciler = NewReplicasAlignmentReconciler() + reconciler = NewInstanceAlignmentReconciler() res, err = reconciler.Reconcile(tree) Expect(err).Should(BeNil()) Expect(res).Should(Equal(kubebuilderx.Continue)) diff --git a/pkg/controller/instanceset/tree_loader.go b/pkg/controller/instanceset/tree_loader.go index 57cc302db26..73a79c0f97a 100644 --- a/pkg/controller/instanceset/tree_loader.go +++ b/pkg/controller/instanceset/tree_loader.go @@ -51,6 +51,7 @@ func (r *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Re } tree.Context = ctx + tree.Reader = reader tree.EventRecorder = recorder tree.Logger = logger tree.SetFinalizer(finalizer) diff --git a/pkg/controller/instanceset/utils.go b/pkg/controller/instanceset/utils.go index 58297d9983b..1d4e503e891 100644 --- a/pkg/controller/instanceset/utils.go +++ b/pkg/controller/instanceset/utils.go @@ -36,6 +36,8 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/kbagent" ) const defaultPriority = 0 @@ -209,13 +211,45 @@ func newLifecycleAction(its *workloads.InstanceSet, objects []client.Object, pod compName = its.Labels[constant.KBAppComponentLabelKey] lifecycleActions = &kbappsv1.ComponentLifecycleActions{ Switchover: its.Spec.LifecycleActions.Switchover, + MemberJoin: its.Spec.LifecycleActions.MemberJoin, + MemberLeave: its.Spec.LifecycleActions.MemberLeave, Reconfigure: its.Spec.LifecycleActions.Reconfigure, } - pods []*corev1.Pod + replica = &lifecycleReplica{ + Pod: *pod, + } + replicas []lifecycle.Replica ) for i := range objects { - pods = append(pods, objects[i].(*corev1.Pod)) + replicas = append(replicas, &lifecycleReplica{ + Pod: *(objects[i].(*corev1.Pod)), + }) } return lifecycle.New(its.Namespace, clusterName, compName, - lifecycleActions, its.Spec.LifecycleActions.TemplateVars, pod, pods...) + lifecycleActions, its.Spec.LifecycleActions.TemplateVars, replica, replicas...) +} + +type lifecycleReplica struct { + corev1.Pod +} + +func (r *lifecycleReplica) Namespace() string { + return r.ObjectMeta.Namespace +} + +func (r *lifecycleReplica) Name() string { + return r.ObjectMeta.Name +} + +func (r *lifecycleReplica) Role() string { + return r.ObjectMeta.Labels[constant.RoleLabelKey] +} + +func (r *lifecycleReplica) Endpoint() (string, int32, error) { + port, err := intctrlutil.GetPortByName(r.Pod, kbagent.ContainerName, kbagent.DefaultHTTPPortName) + return r.Status.PodIP, port, err +} + +func (r *lifecycleReplica) StreamingEndpoint() (string, int32, error) { + return "", 0, fmt.Errorf("NotSupported") } diff --git a/pkg/controller/instanceset/utils_test.go b/pkg/controller/instanceset/utils_test.go index 1da9f744ce9..5444cb53209 100644 --- a/pkg/controller/instanceset/utils_test.go +++ b/pkg/controller/instanceset/utils_test.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/controller/builder" @@ -174,12 +175,12 @@ var _ = Describe("utils test", func() { SetReplicas(replicas). GetObject() its.Status = workloads.InstanceSetStatus{ - InitReplicas: replicas, + InitReplicas: ptr.To(replicas), } Expect(its.IsInstanceSetReady()).Should(BeFalse()) By("set its.status.observedGeneration to not equal generation") - its.Status.ReadyInitReplicas = replicas + its.Status.ReadyInitReplicas = ptr.To(replicas) its.Generation = 1 Expect(its.IsInstanceSetReady()).Should(BeFalse()) diff --git a/pkg/controller/instanceset2/assistant_object_utils.go b/pkg/controller/instanceset2/assistant_object_utils.go index 1532efcff9c..71c6395aa31 100644 --- a/pkg/controller/instanceset2/assistant_object_utils.go +++ b/pkg/controller/instanceset2/assistant_object_utils.go @@ -99,6 +99,8 @@ func objectReferenceToObject(objRef corev1.ObjectReference) (client.Object, erro Name: objRef.Name, } switch objRef.Kind { + case objectKind(&corev1.Service{}): + return &corev1.Service{ObjectMeta: meta}, nil case objectKind(&corev1.ConfigMap{}): return &corev1.ConfigMap{ObjectMeta: meta}, nil case objectKind(&corev1.Secret{}): @@ -129,10 +131,13 @@ func instanceAssistantObject(obj client.Object) workloads.InstanceAssistantObjec } } if service, ok := obj.(*corev1.Service); ok { + spec := service.Spec.DeepCopy() + spec.ClusterIP = "" + spec.ClusterIPs = nil return workloads.InstanceAssistantObject{ Service: &corev1.Service{ ObjectMeta: objectMeta(), - Spec: service.Spec, + Spec: *spec, }, } } diff --git a/pkg/controller/instanceset2/instance_util.go b/pkg/controller/instanceset2/instance_util.go index a0dfabef13f..0f7b7aa63bb 100644 --- a/pkg/controller/instanceset2/instance_util.go +++ b/pkg/controller/instanceset2/instance_util.go @@ -138,7 +138,7 @@ func buildInstanceByTemplate(tree *kubebuilderx.ObjectTree, SetPodUpdatePolicy(its.Spec.PodUpdatePolicy). SetPodUpgradePolicy(its.Spec.PodUpgradePolicy). SetRoles(its.Spec.Roles). - SetLifecycleActions(its.Spec.LifecycleActions) + SetLifecycleActions(its.Spec.LifecycleActions, its.IsInInitializing()) // set these immutable fields only on initial Pod creation, not updates. b.SetHostname(instName). diff --git a/pkg/controller/instanceset2/reconciler_assistant_object.go b/pkg/controller/instanceset2/reconciler_headless_service.go similarity index 55% rename from pkg/controller/instanceset2/reconciler_assistant_object.go rename to pkg/controller/instanceset2/reconciler_headless_service.go index 86ebf30d0c8..afbf4ec6b88 100644 --- a/pkg/controller/instanceset2/reconciler_assistant_object.go +++ b/pkg/controller/instanceset2/reconciler_headless_service.go @@ -21,6 +21,7 @@ package instanceset2 import ( "fmt" + "slices" "strings" corev1 "k8s.io/api/core/v1" @@ -35,15 +36,15 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) -func NewAssistantObjectReconciler() kubebuilderx.Reconciler { - return &assistantObjectReconciler{} +func NewHeadlessServiceReconciler() kubebuilderx.Reconciler { + return &headlessServiceReconciler{} } -type assistantObjectReconciler struct{} +type headlessServiceReconciler struct{} -var _ kubebuilderx.Reconciler = &assistantObjectReconciler{} +var _ kubebuilderx.Reconciler = &headlessServiceReconciler{} -func (a *assistantObjectReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { +func (r *headlessServiceReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { return kubebuilderx.ConditionUnsatisfied } @@ -53,72 +54,81 @@ func (a *assistantObjectReconciler) PreCondition(tree *kubebuilderx.ObjectTree) return kubebuilderx.ConditionSatisfied } -func (a *assistantObjectReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) { - var ( - objects []client.Object - its, _ = tree.GetRoot().(*workloads.InstanceSet) - ) - - if !its.Spec.DisableDefaultHeadlessService && !shouldCloneInstanceAssistantObjects(its) { +func (r *headlessServiceReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) { + its, _ := tree.GetRoot().(*workloads.InstanceSet) + var headlessService *corev1.Service + if !its.Spec.DisableDefaultHeadlessService { labels := getMatchLabels(its.Name) headlessSelectors := getHeadlessSvcSelector(its) - headLessSvc := buildHeadlessSvc(*its, labels, headlessSelectors) - objects = append(objects, headLessSvc) + headlessService = buildHeadlessSvc(*its, labels, headlessSelectors) } - for _, object := range objects { - if err := intctrlutil.SetOwnership(its, object, model.GetScheme(), finalizer); err != nil { + if headlessService != nil { + if err := intctrlutil.SetOwnership(its, headlessService, model.GetScheme(), finalizer); err != nil { return kubebuilderx.Continue, err } } - // compute create/update/delete set - newSnapshot := make(map[model.GVKNObjKey]client.Object) - for _, object := range objects { - name, err := model.GetGVKName(object) - if err != nil { - return kubebuilderx.Continue, err - } - newSnapshot[*name] = object + oldHeadlessService, err := tree.Get(buildHeadlessSvc(*its, nil, nil)) + if err != nil { + return kubebuilderx.Continue, err } - oldSnapshot := make(map[model.GVKNObjKey]client.Object) - svcList := tree.List(&corev1.Service{}) - for _, objectList := range [][]client.Object{svcList} { - for _, object := range objectList { - name, err := model.GetGVKName(object) - if err != nil { - return kubebuilderx.Continue, err - } - oldSnapshot[*name] = object - } - } - - // now compute the diff between old and target snapshot and generate the plan - oldNameSet := sets.KeySet(oldSnapshot) - newNameSet := sets.KeySet(newSnapshot) - createSet := newNameSet.Difference(oldNameSet) - updateSet := newNameSet.Intersection(oldNameSet) - deleteSet := oldNameSet.Difference(newNameSet) - for name := range createSet { - if err := tree.Add(newSnapshot[name]); err != nil { + skipToReconcileOpt := kubebuilderx.SkipToReconcile(shouldCloneInstanceAssistantObjects(its)) + if oldHeadlessService == nil && headlessService != nil { + if err := tree.AddWithOption(headlessService, skipToReconcileOpt); err != nil { return kubebuilderx.Continue, err } } - for name := range updateSet { - oldObj := oldSnapshot[name] - newObj := copyAndMerge(oldObj, newSnapshot[name]) - if err := tree.Update(newObj); err != nil { + if oldHeadlessService != nil && headlessService != nil { + newObj := copyAndMerge(oldHeadlessService, headlessService) + if err := tree.Update(newObj, skipToReconcileOpt); err != nil { return kubebuilderx.Continue, err } } - for name := range deleteSet { - if err := tree.Delete(oldSnapshot[name]); err != nil { + if oldHeadlessService != nil && headlessService == nil { + if err := tree.DeleteWithOption(oldHeadlessService, skipToReconcileOpt); err != nil { return kubebuilderx.Continue, err } } + + if headlessService != nil { + r.addHeadlessService(its, headlessService) + } else { + r.deleteHeadlessService(its, oldHeadlessService) + } + return kubebuilderx.Continue, nil } +func (r *headlessServiceReconciler) addHeadlessService(its *workloads.InstanceSet, svc *corev1.Service) { + if shouldCloneInstanceAssistantObjects(its) && svc != nil { + if its.Spec.InstanceAssistantObjects == nil { + its.Spec.InstanceAssistantObjects = make([]corev1.ObjectReference, 0) + } + gvk, _ := model.GetGVKName(svc) + its.Spec.InstanceAssistantObjects = append(its.Spec.InstanceAssistantObjects, + corev1.ObjectReference{ + Kind: gvk.Kind, + Namespace: gvk.Namespace, + Name: gvk.Name, + }) + } +} + +func (r *headlessServiceReconciler) deleteHeadlessService(its *workloads.InstanceSet, obj client.Object) { + var svc *corev1.Service + if obj != nil { + svc = obj.(*corev1.Service) + } + if svc != nil { + gvk, _ := model.GetGVKName(svc) + its.Spec.InstanceAssistantObjects = slices.DeleteFunc(its.Spec.InstanceAssistantObjects, + func(o corev1.ObjectReference) bool { + return o.Kind == gvk.Kind && o.Namespace == gvk.Namespace && o.Name == gvk.Name + }) + } +} + func getHeadlessSvcSelector(its *workloads.InstanceSet) map[string]string { selectors := make(map[string]string) for k, v := range its.Spec.Selector.MatchLabels { diff --git a/pkg/controller/instanceset2/reconciler_revision_update.go b/pkg/controller/instanceset2/reconciler_revision_update.go index a9e8e7ecc99..82307a7c5a2 100644 --- a/pkg/controller/instanceset2/reconciler_revision_update.go +++ b/pkg/controller/instanceset2/reconciler_revision_update.go @@ -20,6 +20,7 @@ along with this program. If not, see . package instanceset2 import ( + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" @@ -45,8 +46,8 @@ func (r *revisionUpdateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) * func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) { its, _ := tree.GetRoot().(*workloads.InstanceSet) - updatedReplicas := r.calculateUpdatedReplicas(its, tree.List(&workloads.Instance{})) - its.Status.UpdatedReplicas = updatedReplicas + its.Status.UpdatedReplicas = r.calculateUpdatedReplicas(its, tree.List(&workloads.Instance{})) + its.Status.InitReplicas = r.buildInitReplicas(its) its.Status.ObservedGeneration = its.Generation @@ -63,3 +64,23 @@ func (r *revisionUpdateReconciler) calculateUpdatedReplicas(its *workloads.Insta } return updatedReplicas } + +func (r *revisionUpdateReconciler) buildInitReplicas(its *workloads.InstanceSet) *int32 { + initReplicas := its.Status.InitReplicas + if initReplicas == nil && ptr.Deref(its.Spec.Replicas, 0) > 0 { + initReplicas = its.Spec.Replicas + } + if initReplicas == nil { + return nil // the replicas is not set or set to 0 + } + + if *initReplicas != ptr.Deref(its.Status.ReadyInitReplicas, 0) { // in init phase + // in case the replicas is changed in the middle of init phase + if ptr.Deref(its.Spec.Replicas, 0) == 0 { + return nil + } else { + return its.Spec.Replicas + } + } + return initReplicas +} diff --git a/pkg/controller/instanceset2/reconciler_status.go b/pkg/controller/instanceset2/reconciler_status.go index 8ed4d06dab6..d6e59a54bec 100644 --- a/pkg/controller/instanceset2/reconciler_status.go +++ b/pkg/controller/instanceset2/reconciler_status.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/controller/instancetemplate" @@ -79,11 +80,6 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder template2TotalReplicas[template.Name] = templateReplicas } - // podToNodeMapping, err := ParseNodeSelectorOnceAnnotation(its) - // if err != nil { - // return kubebuilderx.Continue, err - // } - for _, inst := range instanceList { _, ordinal := parseParentNameAndOrdinal(inst.Name) templateName := inst.Labels[instancetemplate.TemplateNameLabelKey] @@ -122,17 +118,8 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder template2TemplatesStatus[templateName].CurrentReplicas++ } } - - // TODO: ??? - // if nodeName, ok := podToNodeMapping[inst.Name]; ok { - // // there's chance that a pod is currently running and wait to be deleted so that it can be rescheduled - // if inst.Spec.NodeName == nodeName { - // if err := deleteNodeSelectorOnceAnnotation(its, inst.Name); err != nil { - // return kubebuilderx.Continue, err - // } - // } - // } } + its.Status.ReadyInitReplicas = r.buildReadyInitReplicas(its, readyReplicas) its.Status.Replicas = replicas its.Status.Ordinals = ordinals slices.Sort(its.Status.Ordinals) @@ -141,7 +128,7 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder its.Status.CurrentReplicas = currentReplicas its.Status.UpdatedReplicas = updatedReplicas // its.Status.CurrentRevisions, _ = buildRevisions(currentRevisions) - its.Status.TemplatesStatus = buildTemplatesStatus(template2TemplatesStatus) + its.Status.TemplatesStatus = r.buildTemplatesStatus(template2TemplatesStatus) // all pods have been updated totalReplicas := int32(1) if its.Spec.Replicas != nil { @@ -158,20 +145,20 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder } } - readyCondition, err := buildReadyCondition(its, readyReplicas >= replicas, notReadyNames) + readyCondition, err := r.buildReadyCondition(its, readyReplicas >= replicas, notReadyNames) if err != nil { return kubebuilderx.Continue, err } meta.SetStatusCondition(&its.Status.Conditions, *readyCondition) - availableCondition, err := buildAvailableCondition(its, availableReplicas >= replicas, notAvailableNames) + availableCondition, err := r.buildAvailableCondition(its, availableReplicas >= replicas, notAvailableNames) if err != nil { return kubebuilderx.Continue, err } meta.SetStatusCondition(&its.Status.Conditions, *availableCondition) // 3. set InstanceFailure condition - failureCondition, err := buildFailureCondition(its, instanceList) + failureCondition, err := r.buildFailureCondition(its, instanceList) if err != nil { return kubebuilderx.Continue, err } @@ -181,8 +168,8 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder meta.RemoveStatusCondition(&its.Status.Conditions, string(workloads.InstanceFailure)) } - // 4. set instance status - setInstanceStatus(its, instanceList) + // 4. build instance status + r.buildInstanceStatus(its, instanceList) if its.Spec.MinReadySeconds > 0 && availableReplicas != readyReplicas { return kubebuilderx.RetryAfter(time.Second), nil @@ -190,14 +177,18 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder return kubebuilderx.Continue, nil } -func buildConditionMessageWithNames(instanceNames []string) ([]byte, error) { - baseSort(instanceNames, func(i int) (string, int) { - return parseParentNameAndOrdinal(instanceNames[i]) - }, nil, true) - return json.Marshal(instanceNames) +func (r *statusReconciler) buildReadyInitReplicas(its *workloads.InstanceSet, readyReplicas int32) *int32 { + if its.Status.InitReplicas == nil { + return nil + } + // init replicas cannot be zero + if *its.Status.InitReplicas == ptr.Deref(its.Status.ReadyInitReplicas, 0) { + return its.Status.ReadyInitReplicas + } + return ptr.To(readyReplicas) } -func buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.InstanceTemplateStatus) []workloads.InstanceTemplateStatus { +func (r *statusReconciler) buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.InstanceTemplateStatus) []workloads.InstanceTemplateStatus { var templatesStatus []workloads.InstanceTemplateStatus for templateName, templateStatus := range template2TemplatesStatus { if len(templateName) == 0 { @@ -212,7 +203,7 @@ func buildTemplatesStatus(template2TemplatesStatus map[string]*workloads.Instanc return templatesStatus } -func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames sets.Set[string]) (*metav1.Condition, error) { +func (r *statusReconciler) buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames sets.Set[string]) (*metav1.Condition, error) { condition := &metav1.Condition{ Type: string(workloads.InstanceReady), Status: metav1.ConditionTrue, @@ -222,7 +213,7 @@ func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames s if !ready { condition.Status = metav1.ConditionFalse condition.Reason = workloads.ReasonNotReady - message, err := buildConditionMessageWithNames(notReadyNames.UnsortedList()) + message, err := r.buildConditionMessageWithNames(notReadyNames.UnsortedList()) if err != nil { return nil, err } @@ -231,7 +222,7 @@ func buildReadyCondition(its *workloads.InstanceSet, ready bool, notReadyNames s return condition, nil } -func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvailableNames sets.Set[string]) (*metav1.Condition, error) { +func (r *statusReconciler) buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvailableNames sets.Set[string]) (*metav1.Condition, error) { condition := &metav1.Condition{ Type: string(workloads.InstanceAvailable), Status: metav1.ConditionTrue, @@ -241,7 +232,7 @@ func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvai if !available { condition.Status = metav1.ConditionFalse condition.Reason = workloads.ReasonNotAvailable - message, err := buildConditionMessageWithNames(notAvailableNames.UnsortedList()) + message, err := r.buildConditionMessageWithNames(notAvailableNames.UnsortedList()) if err != nil { return nil, err } @@ -250,7 +241,7 @@ func buildAvailableCondition(its *workloads.InstanceSet, available bool, notAvai return condition, nil } -func buildFailureCondition(its *workloads.InstanceSet, instances []*workloads.Instance) (*metav1.Condition, error) { +func (r *statusReconciler) buildFailureCondition(its *workloads.InstanceSet, instances []*workloads.Instance) (*metav1.Condition, error) { var failureNames []string for _, inst := range instances { if intctrlutil.IsInstanceFailure(inst) { @@ -260,7 +251,7 @@ func buildFailureCondition(its *workloads.InstanceSet, instances []*workloads.In if len(failureNames) == 0 { return nil, nil } - message, err := buildConditionMessageWithNames(failureNames) + message, err := r.buildConditionMessageWithNames(failureNames) if err != nil { return nil, err } @@ -273,34 +264,38 @@ func buildFailureCondition(its *workloads.InstanceSet, instances []*workloads.In }, nil } -func setInstanceStatus(its *workloads.InstanceSet, instances []*workloads.Instance) { - // compose new instance status +func (r *statusReconciler) buildConditionMessageWithNames(instanceNames []string) ([]byte, error) { + baseSort(instanceNames, func(i int) (string, int) { + return parseParentNameAndOrdinal(instanceNames[i]) + }, nil, true) + return json.Marshal(instanceNames) +} + +func (r *statusReconciler) buildInstanceStatus(its *workloads.InstanceSet, instances []*workloads.Instance) { instanceStatus := make([]workloads.InstanceStatus, 0) for _, inst := range instances { - status := workloads.InstanceStatus{ + instanceStatus = append(instanceStatus, workloads.InstanceStatus{ PodName: inst.Name, - } - instanceStatus = append(instanceStatus, status) + }) } - syncMemberStatus(its, instanceStatus, instances) + r.syncInstanceRoleStatus(its, instanceStatus, instances) + r.syncInstanceConfigStatus(its, instanceStatus) + r.syncInstanceLifecycleStatus(its, instanceStatus, instances) + r.syncInstancePVCStatus(its, instanceStatus, instances) - syncInstanceConfigStatus(its, instanceStatus) - - syncInstancePVCStatus(its, instanceStatus, instances) - - sortInstanceStatus(instanceStatus) + r.sortInstanceStatus(instanceStatus) its.Status.InstanceStatus = instanceStatus } -func sortInstanceStatus(instanceStatus []workloads.InstanceStatus) { +func (r *statusReconciler) sortInstanceStatus(instanceStatus []workloads.InstanceStatus) { getNameNOrdinalFunc := func(i int) (string, int) { return parseParentNameAndOrdinal(instanceStatus[i].PodName) } baseSort(instanceStatus, getNameNOrdinalFunc, nil, true) } -func syncMemberStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, instances []*workloads.Instance) { +func (r *statusReconciler) syncInstanceRoleStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, instances []*workloads.Instance) { if its.Spec.Roles != nil { roleMap := composeRoleMap(*its) for _, inst := range instances { @@ -322,7 +317,7 @@ func syncMemberStatus(its *workloads.InstanceSet, instanceStatus []workloads.Ins } } -func syncInstanceConfigStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus) { +func (r *statusReconciler) syncInstanceConfigStatus(its *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus) { if its.Status.InstanceStatus == nil { // initialize configs := make([]workloads.InstanceConfigStatus, 0) @@ -359,11 +354,27 @@ func syncInstanceConfigStatus(its *workloads.InstanceSet, instanceStatus []workl } } -func syncInstancePVCStatus(_ *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, instances []*workloads.Instance) { +func (r *statusReconciler) syncInstanceLifecycleStatus(_ *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, instances []*workloads.Instance) { + pm := make(map[string]*workloads.Instance) + for i, inst := range instances { + pm[inst.Name] = instances[i] + } + for i, status := range instanceStatus { + inst, ok := pm[status.PodName] + if !ok { + continue + } + instanceStatus[i].Provisioned = inst.Status.Provisioned + instanceStatus[i].DataLoaded = inst.Status.DataLoaded + instanceStatus[i].MemberJoined = inst.Status.MemberJoined + } +} + +func (r *statusReconciler) syncInstancePVCStatus(_ *workloads.InstanceSet, instanceStatus []workloads.InstanceStatus, instances []*workloads.Instance) { for _, inst := range instances { for i, status := range instanceStatus { if status.PodName == inst.Name { - instanceStatus[i].VolumeExpansion = inst.Status.VolumeExpansion + instanceStatus[i].InVolumeExpansion = inst.Status.InVolumeExpansion break } } diff --git a/pkg/controller/kubebuilderx/plan_builder.go b/pkg/controller/kubebuilderx/plan_builder.go index de6ba47799e..4163c027c22 100644 --- a/pkg/controller/kubebuilderx/plan_builder.go +++ b/pkg/controller/kubebuilderx/plan_builder.go @@ -165,18 +165,32 @@ func buildOrderedVertices(transCtx *transformContext, currentTree *ObjectTree, d workloadVertices = append(workloadVertices, vertex) } } + graphOptions := func(options ObjectOptions) []model.GraphOption { + opts := []model.GraphOption{ + inDataContext4G(), + } + for i := range options.PrevHooks { + opts = append(opts, model.WithPrevHook(options.PrevHooks[i])) + } + for i := range options.PostHooks { + opts = append(opts, model.WithPostHook(options.PostHooks[i])) + } + return opts + } createNewObjects := func() { for name := range createSet { - if desiredTree.childrenOptions[name].SkipToReconcile { + options := desiredTree.childrenOptions[name] + if options.SkipToReconcile { continue } - v := model.NewObjectVertex(nil, assign(ctx, newSnapshot[name]), model.ActionCreatePtr(), inDataContext4G()) + v := model.NewObjectVertex(nil, assign(ctx, newSnapshot[name]), model.ActionCreatePtr(), graphOptions(options)...) findAndAppend(v) } } updateObjects := func() { for name := range updateSet { - if desiredTree.childrenOptions[name].SkipToReconcile { + options := desiredTree.childrenOptions[name] + if options.SkipToReconcile { continue } oldObj := oldSnapshot[name] @@ -190,21 +204,23 @@ func buildOrderedVertices(transCtx *transformContext, currentTree *ObjectTree, d var v *model.ObjectVertex subResource := desiredTree.childrenOptions[*name].SubResource if subResource != "" { - v = model.NewObjectVertex(oldObj, newObj, model.ActionUpdatePtr(), inDataContext4G(), model.WithSubResource(subResource)) + opts := append(graphOptions(options), model.WithSubResource(subResource)) + v = model.NewObjectVertex(oldObj, newObj, model.ActionUpdatePtr(), opts...) } else { - v = model.NewObjectVertex(oldObj, newObj, model.ActionUpdatePtr(), inDataContext4G()) + v = model.NewObjectVertex(oldObj, newObj, model.ActionUpdatePtr(), graphOptions(options)...) } findAndAppend(v) } } } - deleteOrphanObjects := func() { + deleteObjects := func() { for name := range deleteSet { - if desiredTree.childrenOptions[name].SkipToReconcile { + options := desiredTree.childrenOptions[name] + if options.SkipToReconcile { continue } object := oldSnapshot[name] - v := model.NewObjectVertex(nil, object, model.ActionDeletePtr(), inDataContext4G()) + v := model.NewObjectVertex(nil, object, model.ActionDeletePtr(), graphOptions(options)...) findAndAppend(v) } } @@ -218,7 +234,7 @@ func buildOrderedVertices(transCtx *transformContext, currentTree *ObjectTree, d // objects to be updated updateObjects() // objects to be deleted - deleteOrphanObjects() + deleteObjects() // handle object dependencies handleDependencies() return vertices @@ -246,18 +262,49 @@ func (b *PlanBuilder) defaultWalkFunc(v graph.Vertex) error { if vertex.Action == nil { return errors.New("vertex action can't be nil") } + if err := b.prevCallHooks(vertex); err != nil { + return fmt.Errorf("vertex call prev hooks failed: %v, obj: %s, action: %v", err, vertex.Obj.GetName(), *vertex.Action) + } + var err error ctx := b.transCtx.ctx switch *vertex.Action { case model.CREATE: - return b.createObject(ctx, vertex) + err = b.createObject(ctx, vertex) case model.UPDATE: - return b.updateObject(ctx, vertex) + err = b.updateObject(ctx, vertex) case model.PATCH: - return b.patchObject(ctx, vertex) + err = b.patchObject(ctx, vertex) case model.DELETE: - return b.deleteObject(ctx, vertex) + err = b.deleteObject(ctx, vertex) case model.STATUS: - return b.statusObject(ctx, vertex) + err = b.statusObject(ctx, vertex) + } + if err == nil { + if err = b.postCallHooks(vertex); err != nil { + return fmt.Errorf("vertex call post hooks failed: %v, obj: %s, action: %v", err, vertex.Obj.GetName(), *vertex.Action) + } + } + return nil +} + +func (b *PlanBuilder) prevCallHooks(vertex *model.ObjectVertex) error { + for i := range vertex.PrevHooks { + if vertex.PrevHooks[i] != nil { + if err := vertex.PrevHooks[i](vertex.Obj); err != nil { + return err + } + } + } + return nil +} + +func (b *PlanBuilder) postCallHooks(vertex *model.ObjectVertex) error { + for i := range vertex.PostHooks { + if vertex.PostHooks[i] != nil { + if err := vertex.PostHooks[i](vertex.Obj); err != nil { + return err + } + } } return nil } diff --git a/pkg/controller/kubebuilderx/reconciler.go b/pkg/controller/kubebuilderx/reconciler.go index 02fe83e2343..a090eb9fb16 100644 --- a/pkg/controller/kubebuilderx/reconciler.go +++ b/pkg/controller/kubebuilderx/reconciler.go @@ -43,6 +43,10 @@ type ObjectOptions struct { // if true, the object should not be reconciled SkipToReconcile bool + + // hooks are called before or after the object is manipulated + PrevHooks []func(client.Object) error + PostHooks []func(client.Object) error } type WithSubResource string @@ -57,9 +61,22 @@ func (o SkipToReconcile) ApplyToObject(opts *ObjectOptions) { opts.SkipToReconcile = bool(o) } +type WithPrevHook func(client.Object) error + +func (o WithPrevHook) ApplyToObject(opts *ObjectOptions) { + opts.PrevHooks = append(opts.PrevHooks, o) +} + +type WithPostHook func(client.Object) error + +func (o WithPostHook) ApplyToObject(opts *ObjectOptions) { + opts.PostHooks = append(opts.PostHooks, o) +} + type ObjectTree struct { // TODO(free6om): should find a better place to hold these two params? context.Context + client.Reader record.EventRecorder logr.Logger @@ -252,8 +269,20 @@ func (t *ObjectTree) Delete(objects ...client.Object) error { return nil } -func (t *ObjectTree) DeleteSecondaryObjects() { - t.children = make(model.ObjectSnapshot) +func (t *ObjectTree) DeleteWithOption(object client.Object, options ...ObjectOption) error { + name, err := model.GetGVKName(object) + if err != nil { + return err + } + delete(t.children, *name) + if len(options) > 0 { + option := ObjectOptions{} + for _, opt := range options { + opt.ApplyToObject(&option) + } + t.childrenOptions[*name] = option + } + return nil } func (t *ObjectTree) SetFinalizer(finalizer string) { diff --git a/pkg/controller/kubebuilderx/reconciler_test.go b/pkg/controller/kubebuilderx/reconciler_test.go index 242c1ba06bf..7ab374ba14d 100644 --- a/pkg/controller/kubebuilderx/reconciler_test.go +++ b/pkg/controller/kubebuilderx/reconciler_test.go @@ -67,8 +67,6 @@ var _ = Describe("reconciler test", func() { Expect(tree.Update(obj0Update)).Should(Succeed()) Expect(tree.List(&corev1.Pod{})[0]).Should(Equal(obj0Update)) Expect(tree.GetSecondaryObjects()).Should(HaveLen(1)) - tree.DeleteSecondaryObjects() - Expect(tree.GetSecondaryObjects()).Should(HaveLen(0)) By("DeepCopy") tree.SetRoot(root) diff --git a/pkg/controller/lifecycle/kbagent.go b/pkg/controller/lifecycle/kbagent.go index 45147eb63eb..043194a5147 100644 --- a/pkg/controller/lifecycle/kbagent.go +++ b/pkg/controller/lifecycle/kbagent.go @@ -25,7 +25,6 @@ import ( "math/rand" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,8 +32,6 @@ import ( appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" - kbagt "github.com/apecloud/kubeblocks/pkg/kbagent" kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client" "github.com/apecloud/kubeblocks/pkg/kbagent/proto" ) @@ -50,8 +47,8 @@ type kbagent struct { compName string lifecycleActions *appsv1.ComponentLifecycleActions templateVars map[string]string - pods []*corev1.Pod - pod *corev1.Pod + replicas []Replica + replica Replica } var _ Lifecycle = &kbagent{} @@ -81,13 +78,12 @@ func (a *kbagent) RoleProbe(ctx context.Context, cli client.Reader, opts *Option } func (a *kbagent) Switchover(ctx context.Context, cli client.Reader, opts *Options, candidate string) error { - roleName := a.pod.Labels[constant.RoleLabelKey] lfa := &switchover{ namespace: a.namespace, clusterName: a.clusterName, compName: a.compName, - role: roleName, - currentPod: a.pod.Name, + role: a.replica.Role(), + currentPod: a.replica.Name(), candidatePod: candidate, } return a.ignoreOutput(a.checkedCallAction(ctx, cli, a.lifecycleActions.Switchover, lfa, opts)) @@ -98,7 +94,7 @@ func (a *kbagent) MemberJoin(ctx context.Context, cli client.Reader, opts *Optio namespace: a.namespace, clusterName: a.clusterName, compName: a.compName, - pod: a.pod, + podName: a.replica.Name(), } return a.ignoreOutput(a.checkedCallAction(ctx, cli, a.lifecycleActions.MemberJoin, lfa, opts)) } @@ -108,7 +104,7 @@ func (a *kbagent) MemberLeave(ctx context.Context, cli client.Reader, opts *Opti namespace: a.namespace, clusterName: a.clusterName, compName: a.compName, - pod: a.pod, + podName: a.replica.Name(), } return a.ignoreOutput(a.checkedCallAction(ctx, cli, a.lifecycleActions.MemberLeave, lfa, opts)) } @@ -279,11 +275,11 @@ func (a *kbagent) templateVarsParameters() (map[string]string, error) { } func (a *kbagent) callActionWithSelector(ctx context.Context, spec *appsv1.Action, lfa lifecycleAction, req *proto.ActionRequest) ([]byte, error) { - pods, err := a.selectTargetPods(spec) + replicas, err := a.selectTargetPods(spec) if err != nil { return nil, err } - if len(pods) == 0 { + if len(replicas) == 0 { return nil, fmt.Errorf("no available pod to execute action %s", lfa.name()) } @@ -291,11 +287,11 @@ func (a *kbagent) callActionWithSelector(ctx context.Context, spec *appsv1.Actio // - back-off to retry // - timeout var output []byte - for _, pod := range pods { + for _, replica := range replicas { endpoint := func() (string, int32, error) { - host, port, err := a.serverEndpoint(pod) + host, port, err := a.serverEndpoint(replica) if err != nil { - return "", 0, errors.Wrapf(err, "pod %s is unavailable to execute action %s", pod.Name, lfa.name()) + return "", 0, errors.Wrapf(err, "pod %s is unavailable to execute action %s", replica.Name(), lfa.name()) } return host, port, nil } @@ -304,7 +300,7 @@ func (a *kbagent) callActionWithSelector(ctx context.Context, spec *appsv1.Actio if err != nil { // If kb is not run in a k8s cluster, using pod ip to call kb-agent would fail. // So we use a client that utilizes k8s' portforward ability. - cli, err = kbacli.NewPortForwardClient(pod, endpoint) + cli, err = kbacli.NewPortForwardClient(replica.Namespace(), replica.Name(), endpoint) } else { cli, err = kbacli.NewClient(endpoint) } @@ -319,7 +315,7 @@ func (a *kbagent) callActionWithSelector(ctx context.Context, spec *appsv1.Actio _ = cli.Close() if err != nil { - return nil, errors.Wrapf(err, "http error occurred when executing action %s at pod %s", lfa.name(), pod.Name) + return nil, errors.Wrapf(err, "http error occurred when executing action %s at pod %s", lfa.name(), replica.Name()) } if len(rsp.Error) > 0 { return nil, a.formatError(lfa, rsp) @@ -332,19 +328,18 @@ func (a *kbagent) callActionWithSelector(ctx context.Context, spec *appsv1.Actio return output, nil } -func (a *kbagent) selectTargetPods(spec *appsv1.Action) ([]*corev1.Pod, error) { - return SelectTargetPods(a.pods, a.pod, spec) +func (a *kbagent) selectTargetPods(spec *appsv1.Action) ([]Replica, error) { + return SelectTargetPods(a.replicas, a.replica, spec) } -func (a *kbagent) serverEndpoint(pod *corev1.Pod) (string, int32, error) { - port, err := intctrlutil.GetPortByName(*pod, kbagt.ContainerName, kbagt.DefaultHTTPPortName) +func (a *kbagent) serverEndpoint(replica Replica) (string, int32, error) { + host, port, err := replica.Endpoint() if err != nil { // has no kb-agent defined return "", 0, nil } - host := pod.Status.PodIP if host == "" { - return "", 0, fmt.Errorf("pod %v has no ip", pod.Name) + return "", 0, fmt.Errorf("pod %v has no ip", replica.Name()) } return host, port, nil } @@ -380,7 +375,7 @@ func (a *kbagent) formatError(lfa lifecycleAction, rsp proto.ActionResponse) err } } -func SelectTargetPods(pods []*corev1.Pod, pod *corev1.Pod, spec *appsv1.Action) ([]*corev1.Pod, error) { +func SelectTargetPods(replicas []Replica, replica Replica, spec *appsv1.Action) ([]Replica, error) { selector := spec.TargetPodSelector matchingKey := spec.MatchingKey if len(selector) == 0 && spec.Exec != nil && len(spec.Exec.TargetPodSelector) > 0 { @@ -389,26 +384,24 @@ func SelectTargetPods(pods []*corev1.Pod, pod *corev1.Pod, spec *appsv1.Action) matchingKey = spec.Exec.MatchingKey } if len(selector) == 0 { - return []*corev1.Pod{pod}, nil + return []Replica{replica}, nil } - anyPod := func() []*corev1.Pod { - i := rand.Int() % len(pods) - return []*corev1.Pod{pods[i]} + anyPod := func() []Replica { + i := rand.Int() % len(replicas) + return []Replica{replicas[i]} } - allPods := func() []*corev1.Pod { - return pods + allPods := func() []Replica { + return replicas } - podsWithRole := func() []*corev1.Pod { + podsWithRole := func() []Replica { roleName := matchingKey - var rolePods []*corev1.Pod - for i, pod := range pods { - if len(pod.Labels) != 0 { - if pod.Labels[constant.RoleLabelKey] == roleName { - rolePods = append(rolePods, pods[i]) - } + var rolePods []Replica + for i, r := range replicas { + if r.Role() == roleName { + rolePods = append(rolePods, replicas[i]) } } return rolePods diff --git a/pkg/controller/lifecycle/lfa_member.go b/pkg/controller/lifecycle/lfa_member.go index fa035ebf451..857cbc508e9 100644 --- a/pkg/controller/lifecycle/lfa_member.go +++ b/pkg/controller/lifecycle/lfa_member.go @@ -22,7 +22,6 @@ package lifecycle import ( "context" - corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/apecloud/kubeblocks/pkg/constant" @@ -86,7 +85,7 @@ type memberJoin struct { namespace string clusterName string compName string - pod *corev1.Pod + podName string } var _ lifecycleAction = &memberJoin{} @@ -102,8 +101,8 @@ func (a *memberJoin) parameters(ctx context.Context, cli client.Reader) (map[str // - KB_JOIN_MEMBER_POD_NAME: The pod name of the replica being added to the group. compName := constant.GenerateClusterComponentName(a.clusterName, a.compName) return map[string]string{ - joinMemberPodFQDNVar: intctrlutil.PodFQDN(a.namespace, compName, a.pod.Name), - joinMemberPodNameVar: a.pod.Name, + joinMemberPodFQDNVar: intctrlutil.PodFQDN(a.namespace, compName, a.podName), + joinMemberPodNameVar: a.podName, }, nil } @@ -111,7 +110,7 @@ type memberLeave struct { namespace string clusterName string compName string - pod *corev1.Pod + podName string } var _ lifecycleAction = &memberLeave{} @@ -127,7 +126,7 @@ func (a *memberLeave) parameters(ctx context.Context, cli client.Reader) (map[st // - KB_LEAVE_MEMBER_POD_NAME: The pod name of the replica being removed from the group. compName := constant.GenerateClusterComponentName(a.clusterName, a.compName) return map[string]string{ - leaveMemberPodFQDNVar: intctrlutil.PodFQDN(a.namespace, compName, a.pod.Name), - leaveMemberPodNameVar: a.pod.Name, + leaveMemberPodFQDNVar: intctrlutil.PodFQDN(a.namespace, compName, a.podName), + leaveMemberPodNameVar: a.podName, }, nil } diff --git a/pkg/controller/lifecycle/lifecycle.go b/pkg/controller/lifecycle/lifecycle.go index 79f80f3874e..40c0cabce8b 100644 --- a/pkg/controller/lifecycle/lifecycle.go +++ b/pkg/controller/lifecycle/lifecycle.go @@ -23,7 +23,6 @@ import ( "context" "fmt" - corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -59,16 +58,28 @@ type Lifecycle interface { UserDefined(ctx context.Context, cli client.Reader, opts *Options, name string, action *appsv1.Action, args map[string]string) error } +type Replica interface { + Namespace() string + Name() string + Role() string + + // Endpoint returns the host (e.g. IP) and port (e.g. HTTP port) to access the replica from the controller. + Endpoint() (string, int32, error) + + // StreamingEndpoint returns the host (e.g. IP) and port (e.g. streaming port) to stream data from the replica. + StreamingEndpoint() (string, int32, error) +} + func New(namespace, clusterName, compName string, lifecycleActions *appsv1.ComponentLifecycleActions, - templateVars map[string]string, pod *corev1.Pod, pods ...*corev1.Pod) (Lifecycle, error) { - if pod == nil && len(pods) == 0 { + templateVars map[string]string, replica Replica, replicas ...Replica) (Lifecycle, error) { + if replica == nil && len(replicas) == 0 { return nil, fmt.Errorf("either pod or pods must be provided to call lifecycle actions") } - if pod == nil { - pod = pods[0] + if replica == nil { + replica = replicas[0] } - if len(pods) == 0 { - pods = []*corev1.Pod{pod} + if len(replicas) == 0 { + replicas = []Replica{replica} } return &kbagent{ namespace: namespace, @@ -76,7 +87,7 @@ func New(namespace, clusterName, compName string, lifecycleActions *appsv1.Compo compName: compName, lifecycleActions: lifecycleActions, templateVars: templateVars, - pods: pods, - pod: pod, + replicas: replicas, + replica: replica, }, nil } diff --git a/pkg/controller/lifecycle/lifecycle_test.go b/pkg/controller/lifecycle/lifecycle_test.go index 7d14e58e8d6..9a1fa431469 100644 --- a/pkg/controller/lifecycle/lifecycle_test.go +++ b/pkg/controller/lifecycle/lifecycle_test.go @@ -40,6 +40,30 @@ import ( "github.com/apecloud/kubeblocks/pkg/kbagent/proto" ) +type lifecycleReplica struct { + corev1.Pod +} + +func (r *lifecycleReplica) Namespace() string { + return r.ObjectMeta.Namespace +} + +func (r *lifecycleReplica) Name() string { + return r.ObjectMeta.Name +} + +func (r *lifecycleReplica) Role() string { + return r.ObjectMeta.Labels[constant.RoleLabelKey] +} + +func (r *lifecycleReplica) Endpoint() (string, int32, error) { + return r.Status.PodIP, 3306, nil +} + +func (r *lifecycleReplica) StreamingEndpoint() (string, int32, error) { + return "", 0, fmt.Errorf("NotSupported") +} + type mockReader struct { cli client.Reader objs []client.Object @@ -96,7 +120,7 @@ var _ = Describe("lifecycle", func() { clusterName string compName string lifecycleActions *appsv1.ComponentLifecycleActions - pods []*corev1.Pod + pods []Replica ) cleanEnv := func() { @@ -137,7 +161,7 @@ var _ = Describe("lifecycle", func() { FailureThreshold: 3, }, } - pods = []*corev1.Pod{{}} + pods = []Replica{&lifecycleReplica{}} }) AfterEach(func() { @@ -164,9 +188,9 @@ var _ = Describe("lifecycle", func() { Expect(agent.clusterName).Should(Equal(clusterName)) Expect(agent.compName).Should(Equal(compName)) Expect(agent.lifecycleActions).Should(Equal(lifecycleActions)) - Expect(agent.pod).Should(Equal(pod)) - Expect(agent.pods).Should(HaveLen(1)) - Expect(agent.pods[0]).Should(Equal(pod)) + Expect(agent.replica).Should(Equal(pod)) + Expect(agent.replicas).Should(HaveLen(1)) + Expect(agent.replicas[0]).Should(Equal(pod)) }) It("pods", func() { @@ -180,9 +204,9 @@ var _ = Describe("lifecycle", func() { Expect(agent.clusterName).Should(Equal(clusterName)) Expect(agent.compName).Should(Equal(compName)) Expect(agent.lifecycleActions).Should(Equal(lifecycleActions)) - Expect(agent.pod).Should(Equal(pod)) - Expect(agent.pods).Should(HaveLen(1)) - Expect(agent.pods[0]).Should(Equal(pod)) + Expect(agent.replica).Should(Equal(pod)) + Expect(agent.replicas).Should(HaveLen(1)) + Expect(agent.replicas[0]).Should(Equal(pod)) }) }) @@ -477,37 +501,41 @@ var _ = Describe("lifecycle", func() { It("pod selector - any", func() { lifecycleActions.PostProvision.Exec.TargetPodSelector = appsv1.AnyReplica - pods = []*corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-0", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kbagent", - Ports: []corev1.ContainerPort{ - { - Name: "http", + pods = []Replica{ + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-0", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "kbagent", + Ports: []corev1.ContainerPort{ + { + Name: "http", + }, }, }, }, }, }, }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-1", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kbagent", - Ports: []corev1.ContainerPort{ - { - Name: "http", + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-1", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "kbagent", + Ports: []corev1.ContainerPort{ + { + Name: "http", + }, }, }, }, @@ -532,43 +560,48 @@ var _ = Describe("lifecycle", func() { It("pod selector - role", func() { lifecycleActions.PostProvision.Exec.TargetPodSelector = appsv1.RoleSelector lifecycleActions.PostProvision.Exec.MatchingKey = "leader" - pods = []*corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-0", - Labels: map[string]string{ - constant.RoleLabelKey: "follower", + pods = []Replica{ + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-0", + Labels: map[string]string{ + constant.RoleLabelKey: "follower", + }, }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kbagent", - Ports: []corev1.ContainerPort{ - { - Name: "http", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "kbagent", + Ports: []corev1.ContainerPort{ + { + Name: "http", + }, }, }, }, }, }, }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-1", - Labels: map[string]string{ - constant.RoleLabelKey: "leader", + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-1", + Labels: map[string]string{ + constant.RoleLabelKey: "leader", + }, }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kbagent", - Ports: []corev1.ContainerPort{ - { - Name: "http", + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "kbagent", + Ports: []corev1.ContainerPort{ + { + Name: "http", + }, }, }, }, @@ -589,22 +622,26 @@ var _ = Describe("lifecycle", func() { It("pod selector - has no matched", func() { lifecycleActions.PostProvision.Exec.TargetPodSelector = appsv1.RoleSelector lifecycleActions.PostProvision.Exec.MatchingKey = "leader" - pods = []*corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-0", - Labels: map[string]string{ - constant.RoleLabelKey: "follower", + pods = []Replica{ + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-0", + Labels: map[string]string{ + constant.RoleLabelKey: "follower", + }, }, }, }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "pod-1", - Labels: map[string]string{ - constant.RoleLabelKey: "follower", + &lifecycleReplica{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "pod-1", + Labels: map[string]string{ + constant.RoleLabelKey: "follower", + }, }, }, }, diff --git a/pkg/controller/model/graph_options.go b/pkg/controller/model/graph_options.go index 46ac5666a82..8fc70bc174a 100644 --- a/pkg/controller/model/graph_options.go +++ b/pkg/controller/model/graph_options.go @@ -19,7 +19,9 @@ along with this program. If not, see . package model -import "sigs.k8s.io/controller-runtime/pkg/client" +import ( + "sigs.k8s.io/controller-runtime/pkg/client" +) type GraphOptions struct { replaceIfExisting bool @@ -27,6 +29,8 @@ type GraphOptions struct { clientOpt any propagationPolicy client.PropagationPolicy subResource string + prevHooks []func(client.Object) error + postHooks []func(client.Object) error } type GraphOption interface { @@ -99,3 +103,31 @@ func WithSubResource(subResource string) GraphOption { subResource: subResource, } } + +type hookOption struct { + prevHook func(object client.Object) error + postHook func(object client.Object) error +} + +var _ GraphOption = &hookOption{} + +func (o *hookOption) ApplyTo(opts *GraphOptions) { + if o.prevHook != nil { + opts.prevHooks = append(opts.prevHooks, o.prevHook) + } + if o.postHook != nil { + opts.postHooks = append(opts.postHooks, o.postHook) + } +} + +func WithPrevHook(hook func(client.Object) error) GraphOption { + return &hookOption{ + prevHook: hook, + } +} + +func WithPostHook(hook func(client.Object) error) GraphOption { + return &hookOption{ + postHook: hook, + } +} diff --git a/pkg/controller/model/transform_types.go b/pkg/controller/model/transform_types.go index ad21a2a4a44..33d93c59013 100644 --- a/pkg/controller/model/transform_types.go +++ b/pkg/controller/model/transform_types.go @@ -72,6 +72,8 @@ type ObjectVertex struct { SubResource string ClientOpt any PropagationPolicy client.PropagationPolicy + PrevHooks []func(client.Object) error + PostHooks []func(client.Object) error } func (v *ObjectVertex) String() string { @@ -92,6 +94,8 @@ func NewObjectVertex(oldObj, newObj client.Object, action *Action, opts ...Graph Action: action, SubResource: graphOpts.subResource, ClientOpt: graphOpts.clientOpt, + PrevHooks: graphOpts.prevHooks, + PostHooks: graphOpts.postHooks, } } diff --git a/pkg/kbagent/client/portforward_client.go b/pkg/kbagent/client/portforward_client.go index d7fb0c5f6c5..25a9196f1d8 100644 --- a/pkg/kbagent/client/portforward_client.go +++ b/pkg/kbagent/client/portforward_client.go @@ -27,7 +27,6 @@ import ( "net/url" "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -39,10 +38,11 @@ import ( ) type portForwardClient struct { - pod *corev1.Pod - port string - config *rest.Config - logger logr.Logger + namespace string + podName string + port string + config *rest.Config + logger logr.Logger } var _ Client = &portForwardClient{} @@ -134,8 +134,8 @@ func (pf *portForwardClient) newPortForwarder(readyCh, stopCh chan struct{}, out } req := clientset.CoreV1().RESTClient().Post(). Resource("pods"). - Namespace(pf.pod.Namespace). - Name(pf.pod.Name). + Namespace(pf.namespace). + Name(pf.podName). SubResource("portforward") dialer, err := pf.createDialer("POST", req.URL(), pf.config) if err != nil { @@ -149,7 +149,7 @@ func (pf *portForwardClient) newPortForwarder(readyCh, stopCh chan struct{}, out return fw, nil } -func NewPortForwardClient(pod *corev1.Pod, endpoint func() (string, int32, error)) (Client, error) { +func NewPortForwardClient(namespace, podName string, endpoint func() (string, int32, error)) (Client, error) { if mockClient != nil || mockClientError != nil { return mockClient, mockClientError } @@ -161,9 +161,10 @@ func NewPortForwardClient(pod *corev1.Pod, endpoint func() (string, int32, error config := ctrl.GetConfigOrDie() return &portForwardClient{ - pod: pod, - port: fmt.Sprint(port), - config: config, - logger: ctrl.Log.WithName("portforward"), + namespace: namespace, + podName: podName, + port: fmt.Sprint(port), + config: config, + logger: ctrl.Log.WithName("portforward"), }, nil } diff --git a/pkg/kbagent/service/task.go b/pkg/kbagent/service/task.go index 063e3ec5030..9254eee344a 100644 --- a/pkg/kbagent/service/task.go +++ b/pkg/kbagent/service/task.go @@ -156,7 +156,7 @@ func (s *taskService) wait(ch chan error) error { func (s *taskService) notify(task proto.Task, event proto.TaskEvent, sync bool) error { msg, err := json.Marshal(&event) if err == nil { - return util.SendEventWithMessage(&s.logger, "task", string(msg), sync) + return util.SendEventWithMessage(&s.logger, event.Task, string(msg), sync) } else { s.logger.Error(err, fmt.Sprintf("failed to marshal task event, task: %v", task)) return err diff --git a/pkg/operations/custom/utils.go b/pkg/operations/custom/utils.go index 9260a929e19..4ae4ddd2f90 100644 --- a/pkg/operations/custom/utils.go +++ b/pkg/operations/custom/utils.go @@ -23,6 +23,8 @@ import ( "bytes" "context" "fmt" + "reflect" + "slices" "sort" "strconv" "strings" @@ -36,8 +38,10 @@ import ( "github.com/apecloud/kubeblocks/pkg/common" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/multicluster" "github.com/apecloud/kubeblocks/pkg/controller/scheduling" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/generics" ) const ( @@ -375,10 +379,11 @@ func getTargetPods( pods = append(pods, &podList.Items[i]) } } else { - if podSelector.Role != "" { - pods, err = component.ListOwnedPodsWithRole(ctx, cli, cluster.Namespace, cluster.Name, compName, podSelector.Role) - } else { - pods, err = component.ListOwnedPods(ctx, cli, cluster.Namespace, cluster.Name, compName) + pods, err = listCompPods(ctx, cli, cluster.Namespace, cluster.Name, compName) + if podSelector.Role != "" && err == nil { + pods = slices.DeleteFunc(pods, func(pod *corev1.Pod) bool { + return pod.Labels[constant.RoleLabelKey] != podSelector.Role + }) } } if err != nil { @@ -435,3 +440,33 @@ func getTolerations(cluster *appsv1.Cluster, compSpec *appsv1.ClusterComponentSp } return schedulePolicy.Tolerations, nil } + +func listCompPods(ctx context.Context, cli client.Reader, namespace, clusterName, compName string, + opts ...client.ListOption) ([]*corev1.Pod, error) { + labels := constant.GetCompLabels(clusterName, compName) + if opts == nil { + opts = make([]client.ListOption, 0) + } + opts = append(opts, multicluster.InDataContext()) // TODO: pod + return listObjWithLabelsInNamespace(ctx, cli, generics.PodSignature, namespace, labels, opts...) +} + +func listObjWithLabelsInNamespace[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]]( + ctx context.Context, cli client.Reader, _ func(T, PT, L, PL), namespace string, labels client.MatchingLabels, opts ...client.ListOption) ([]PT, error) { + if opts == nil { + opts = make([]client.ListOption, 0) + } + opts = append(opts, []client.ListOption{labels, client.InNamespace(namespace)}...) + + var objList L + if err := cli.List(ctx, PL(&objList), opts...); err != nil { + return nil, err + } + + objs := make([]PT, 0) + items := reflect.ValueOf(&objList).Elem().FieldByName("Items").Interface().([]T) + for i := range items { + objs = append(objs, &items[i]) + } + return objs, nil +} diff --git a/pkg/operations/ops_progress_util.go b/pkg/operations/ops_progress_util.go index 90a76eb3f92..426abfefd26 100644 --- a/pkg/operations/ops_progress_util.go +++ b/pkg/operations/ops_progress_util.go @@ -177,7 +177,7 @@ func handleComponentStatusProgress( if clusterComponent == nil { return 0, 0, nil } - if pods, err = intctrlcomp.ListOwnedPods(reqCtx.Ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, pgRes.fullComponentName); err != nil { + if pods, err = listCompPods(reqCtx.Ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, pgRes.fullComponentName); err != nil { return 0, completedCount, err } expectReplicas := clusterComponent.Replicas diff --git a/pkg/operations/ops_util.go b/pkg/operations/ops_util.go index 280f973beca..7eaada78c57 100644 --- a/pkg/operations/ops_util.go +++ b/pkg/operations/ops_util.go @@ -22,6 +22,7 @@ package operations import ( "context" "fmt" + "reflect" "slices" "time" @@ -33,7 +34,9 @@ import ( appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/multicluster" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/generics" opsutil "github.com/apecloud/kubeblocks/pkg/operations/util" ) @@ -319,3 +322,33 @@ func getComponentSpecOrShardingTemplate(cluster *appsv1.Cluster, componentName s } return nil } + +func listCompPods(ctx context.Context, cli client.Reader, namespace, clusterName, compName string, + opts ...client.ListOption) ([]*corev1.Pod, error) { + labels := constant.GetCompLabels(clusterName, compName) + if opts == nil { + opts = make([]client.ListOption, 0) + } + opts = append(opts, multicluster.InDataContext()) // TODO: pod + return listObjWithLabelsInNamespace(ctx, cli, generics.PodSignature, namespace, labels, opts...) +} + +func listObjWithLabelsInNamespace[T generics.Object, PT generics.PObject[T], L generics.ObjList[T], PL generics.PObjList[T, L]]( + ctx context.Context, cli client.Reader, _ func(T, PT, L, PL), namespace string, labels client.MatchingLabels, opts ...client.ListOption) ([]PT, error) { + if opts == nil { + opts = make([]client.ListOption, 0) + } + opts = append(opts, []client.ListOption{labels, client.InNamespace(namespace)}...) + + var objList L + if err := cli.List(ctx, PL(&objList), opts...); err != nil { + return nil, err + } + + objs := make([]PT, 0) + items := reflect.ValueOf(&objList).Elem().FieldByName("Items").Interface().([]T) + for i := range items { + objs = append(objs, &items[i]) + } + return objs, nil +} diff --git a/pkg/operations/rebuild_instance.go b/pkg/operations/rebuild_instance.go index 6ee2176bfec..178b8345f82 100644 --- a/pkg/operations/rebuild_instance.go +++ b/pkg/operations/rebuild_instance.go @@ -125,7 +125,7 @@ func (r rebuildInstanceOpsHandler) validateRebuildInstanceWithHScale(reqCtx intc synthesizedComp *component.SynthesizedComponent, instanceNames []string) error { // rebuild instance by horizontal scaling - pods, err := component.ListOwnedPods(reqCtx.Ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, synthesizedComp.Name) + pods, err := listCompPods(reqCtx.Ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, synthesizedComp.Name) if err != nil { return err } diff --git a/pkg/operations/suite_test.go b/pkg/operations/suite_test.go index bf01631cb50..6efe70c80c5 100644 --- a/pkg/operations/suite_test.go +++ b/pkg/operations/suite_test.go @@ -46,7 +46,6 @@ import ( parametersv1alpha1 "github.com/apecloud/kubeblocks/apis/parameters/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" "github.com/apecloud/kubeblocks/pkg/constant" - intctrlcomp "github.com/apecloud/kubeblocks/pkg/controller/component" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/testutil" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" @@ -255,7 +254,7 @@ func initOperationsResourcesWithTopology(clusterDefName, compDefName, clusterNam func initInstanceSetPods(ctx context.Context, cli client.Client, opsRes *OpsResource) []*corev1.Pod { // mock the pods of consensusSet component testapps.MockInstanceSetPods(&testCtx, nil, opsRes.Cluster, defaultCompName) - pods, err := intctrlcomp.ListOwnedPods(ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, defaultCompName) + pods, err := listCompPods(ctx, cli, opsRes.Cluster.Namespace, opsRes.Cluster.Name, defaultCompName) Expect(err).Should(Succeed()) // the opsRequest will use startTime to check some condition. // if there is no sleep for 1 second, unstable error may occur. diff --git a/pkg/operations/switchover.go b/pkg/operations/switchover.go index 4b6e085337b..db4915e8826 100644 --- a/pkg/operations/switchover.go +++ b/pkg/operations/switchover.go @@ -40,6 +40,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/lifecycle" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/kbagent" ) // switchover constants @@ -271,7 +272,7 @@ func handleSwitchover(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes * // We consider a switchover action succeeds if the action returns without error. We don't need to know if a switchover is actually executed. func doSwitchover(ctx context.Context, cli client.Reader, synthesizedComp *component.SynthesizedComponent, switchover *opsv1alpha1.Switchover) error { - pods, err := component.ListOwnedPods(ctx, cli, synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name) + pods, err := listCompPods(ctx, cli, synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name) if err != nil { return err } @@ -284,8 +285,7 @@ func doSwitchover(ctx context.Context, cli client.Reader, synthesizedComp *compo } } - lfa, err := lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, - synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, pod, pods...) + lfa, err := newLifecycleAction(synthesizedComp, pods, pod) if err != nil { return err } @@ -380,3 +380,42 @@ func handleProgressDetail( } setComponentSwitchoverProgressDetails(reqCtx.Recorder, opsRequest, appsv1.UpdatingComponentPhase, *progressDetail, compName) } + +func newLifecycleAction(synthesizedComp *component.SynthesizedComponent, pods []*corev1.Pod, pod *corev1.Pod) (lifecycle.Lifecycle, error) { + var ( + replica = &lifecycleReplica{ + Pod: *pod, + } + replicas []lifecycle.Replica + ) + for i := range pods { + replicas = append(replicas, &lifecycleReplica{Pod: *pods[i]}) + } + return lifecycle.New(synthesizedComp.Namespace, synthesizedComp.ClusterName, synthesizedComp.Name, + synthesizedComp.LifecycleActions, synthesizedComp.TemplateVars, replica, replicas...) +} + +type lifecycleReplica struct { + corev1.Pod +} + +func (r *lifecycleReplica) Namespace() string { + return r.ObjectMeta.Namespace +} + +func (r *lifecycleReplica) Name() string { + return r.ObjectMeta.Name +} + +func (r *lifecycleReplica) Role() string { + return r.ObjectMeta.Labels[constant.RoleLabelKey] +} + +func (r *lifecycleReplica) Endpoint() (string, int32, error) { + port, err := intctrlutil.GetPortByName(r.Pod, kbagent.ContainerName, kbagent.DefaultHTTPPortName) + return r.Status.PodIP, port, err +} + +func (r *lifecycleReplica) StreamingEndpoint() (string, int32, error) { + return "", 0, fmt.Errorf("NotSupported") +} diff --git a/pkg/testutil/apps/instance_set_factoy.go b/pkg/testutil/apps/instance_set_factoy.go index 5d651911a28..5f1422cc1d0 100644 --- a/pkg/testutil/apps/instance_set_factoy.go +++ b/pkg/testutil/apps/instance_set_factoy.go @@ -140,3 +140,30 @@ func (factory *MockInstanceSetFactory) SetEnableInstanceAPI(enable *bool) *MockI factory.Get().Spec.EnableInstanceAPI = enable return factory } + +func (factory *MockInstanceSetFactory) SetInstanceAssistantObjects(objs []corev1.ObjectReference) *MockInstanceSetFactory { + factory.Get().Spec.InstanceAssistantObjects = objs + return factory +} + +func (factory *MockInstanceSetFactory) SetLifecycleActions(lifecycleActions *kbappsv1.ComponentLifecycleActions, templateVars map[string]string) *MockInstanceSetFactory { + if lifecycleActions != nil || templateVars != nil { + if factory.Get().Spec.LifecycleActions == nil { + factory.Get().Spec.LifecycleActions = &workloads.LifecycleActions{} + } + } + if lifecycleActions != nil { + factory.Get().Spec.LifecycleActions.Switchover = lifecycleActions.Switchover + factory.Get().Spec.LifecycleActions.MemberJoin = lifecycleActions.MemberJoin + factory.Get().Spec.LifecycleActions.MemberLeave = lifecycleActions.MemberLeave + factory.Get().Spec.LifecycleActions.DataLoad = lifecycleActions.DataLoad + factory.Get().Spec.LifecycleActions.Reconfigure = lifecycleActions.Reconfigure + } + if templateVars != nil { + factory.Get().Spec.LifecycleActions.TemplateVars = make(map[string]string) + for k, v := range templateVars { + factory.Get().Spec.LifecycleActions.TemplateVars[k] = v + } + } + return factory +} diff --git a/pkg/testutil/k8s/instance_set_util.go b/pkg/testutil/k8s/instance_set_util.go index 7f5116ea6df..470fcea73c9 100644 --- a/pkg/testutil/k8s/instance_set_util.go +++ b/pkg/testutil/k8s/instance_set_util.go @@ -25,6 +25,7 @@ import ( "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" @@ -34,8 +35,8 @@ import ( // MockInstanceSetReady mocks the ITS workload to ready state. func MockInstanceSetReady(its *workloads.InstanceSet, pods ...*corev1.Pod) { - its.Status.InitReplicas = *its.Spec.Replicas - its.Status.ReadyInitReplicas = *its.Spec.Replicas + its.Status.InitReplicas = ptr.To(ptr.Deref(its.Spec.Replicas, 0)) + its.Status.ReadyInitReplicas = ptr.To(ptr.Deref(its.Spec.Replicas, 0)) its.Status.AvailableReplicas = *its.Spec.Replicas its.Status.ObservedGeneration = its.Generation its.Status.Replicas = *its.Spec.Replicas @@ -53,14 +54,19 @@ func MockInstanceSetReady(its *workloads.InstanceSet, pods ...*corev1.Pod) { var instanceStatus []workloads.InstanceStatus roleMap := composeRoleMap(*its) for _, pod := range pods { + var role workloads.ReplicaRole + ok := false roleName := strings.ToLower(pod.Labels[constant.RoleLabelKey]) - role, ok := roleMap[roleName] - if !ok { - continue + if len(roleName) > 0 { + role, ok = roleMap[roleName] + if !ok { + continue + } } status := workloads.InstanceStatus{ - PodName: pod.Name, - Role: role.Name, + PodName: pod.Name, + Role: role.Name, + Provisioned: true, } instanceStatus = append(instanceStatus, status) }