diff --git a/config/crds/migration.openshift.io_migplans.yaml b/config/crds/migration.openshift.io_migplans.yaml index 1e50f70d4..049355318 100644 --- a/config/crds/migration.openshift.io_migplans.yaml +++ b/config/crds/migration.openshift.io_migplans.yaml @@ -390,6 +390,8 @@ spec: type: string namespace: type: string + ownerType: + type: string volumeMode: description: PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. diff --git a/pkg/apis/migration/v1alpha1/migplan_types.go b/pkg/apis/migration/v1alpha1/migplan_types.go index 8b3dcefbb..471ed83c6 100644 --- a/pkg/apis/migration/v1alpha1/migplan_types.go +++ b/pkg/apis/migration/v1alpha1/migplan_types.go @@ -885,6 +885,20 @@ type PV struct { ProposedCapacity resource.Quantity `json:"proposedCapacity,omitempty"` } +type OwnerType string + +const ( + VirtualMachine OwnerType = "VirtualMachine" + Deployment OwnerType = "Deployment" + DeploymentConfig OwnerType = "DeploymentConfig" + StatefulSet OwnerType = "StatefulSet" + ReplicaSet OwnerType = "ReplicaSet" + DaemonSet OwnerType = "DaemonSet" + Job OwnerType = "Job" + CronJob OwnerType = "CronJob" + Unknown OwnerType = "Unknown" +) + // PVC type PVC struct { Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` @@ -892,6 +906,7 @@ type PVC struct { AccessModes []kapi.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` VolumeMode kapi.PersistentVolumeMode `json:"volumeMode,omitempty"` HasReference bool `json:"hasReference,omitempty"` + OwnerType OwnerType `json:"ownerType,omitempty"` } // GetTargetName returns name of the target PVC diff --git a/pkg/controller/directvolumemigration/pvcs.go b/pkg/controller/directvolumemigration/pvcs.go index 6b78636c0..ca7879f26 100644 --- a/pkg/controller/directvolumemigration/pvcs.go +++ b/pkg/controller/directvolumemigration/pvcs.go @@ -116,6 +116,7 @@ func (t *Task) createDestinationDV(srcClient, destClient compat.Client, pvc miga } } destPVC.Spec.Resources.Requests[corev1.ResourceStorage] = size + destPVC.Spec.VolumeMode = pvc.TargetVolumeMode return createBlankDataVolumeFromPVC(destClient, destPVC) } diff --git a/pkg/controller/migmigration/migmigration_controller.go b/pkg/controller/migmigration/migmigration_controller.go index 037f54832..54b18cf9b 100644 --- a/pkg/controller/migmigration/migmigration_controller.go +++ b/pkg/controller/migmigration/migmigration_controller.go @@ -243,7 +243,7 @@ func (r *ReconcileMigMigration) Reconcile(ctx context.Context, request reconcile // Validate err = r.validate(ctx, migration) if err != nil { - log.Info("Validation failed, requeueing") + log.V(3).Info("Validation failed, requeueing") sink.Trace(err) return reconcile.Result{Requeue: true}, nil } diff --git a/pkg/controller/migplan/pvlist.go b/pkg/controller/migplan/pvlist.go index 1a9b124dd..1839079fa 100644 --- a/pkg/controller/migplan/pvlist.go +++ b/pkg/controller/migplan/pvlist.go @@ -14,7 +14,9 @@ import ( migpods "github.com/konveyor/mig-controller/pkg/pods" migref "github.com/konveyor/mig-controller/pkg/reference" "github.com/opentracing/opentracing-go" + appsv1 "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -352,10 +354,14 @@ func isStorageConversionPlan(plan *migapi.MigPlan) bool { // Get a list of PVCs found within the specified namespaces. func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) (Claims, error) { claims := Claims{} - list := &core.PersistentVolumeClaimList{} - err := client.List(context.TODO(), list, &k8sclient.ListOptions{}) - if err != nil { - return nil, liberr.Wrap(err) + pvcList := []core.PersistentVolumeClaim{} + for _, namespace := range plan.GetSourceNamespaces() { + list := &core.PersistentVolumeClaimList{} + err := client.List(context.TODO(), list, k8sclient.InNamespace(namespace)) + if err != nil { + return nil, liberr.Wrap(err) + } + pvcList = append(pvcList, list.Items...) } podList, err := migpods.ListTemplatePods(client, plan.GetSourceNamespaces()) @@ -363,19 +369,17 @@ func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) return nil, liberr.Wrap(err) } - runningPods := &core.PodList{} - err = client.List(context.TODO(), runningPods, &k8sclient.ListOptions{}) - if err != nil { - return nil, liberr.Wrap(err) - } - - inNamespaces := func(objNamespace string, namespaces []string) bool { - for _, ns := range namespaces { - if ns == objNamespace { - return true + for _, namespace := range plan.GetSourceNamespaces() { + pods := &core.PodList{} + err = client.List(context.TODO(), pods, k8sclient.InNamespace(namespace)) + if err != nil { + return nil, liberr.Wrap(err) + } + for _, pod := range pods.Items { + if pod.Status.Phase == core.PodRunning { + podList = append(podList, pod) } } - return false } alreadyMigrated := func(pvc core.PersistentVolumeClaim) bool { @@ -398,17 +402,11 @@ func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) isStorageConversionPlan := isStorageConversionPlan(plan) - for _, pod := range runningPods.Items { - if inNamespaces(pod.Namespace, plan.GetSourceNamespaces()) { - podList = append(podList, pod) - } + pvcToOwnerMap, err := r.createPVCToOwnerTypeMap(podList) + if err != nil { + return nil, liberr.Wrap(err) } - - for _, pvc := range list.Items { - if !inNamespaces(pvc.Namespace, plan.GetSourceNamespaces()) { - continue - } - + for _, pvc := range pvcList { if isStorageConversionPlan && (alreadyMigrated(pvc) || migrationSourceOtherPlan(pvc)) { continue } @@ -434,11 +432,71 @@ func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) AccessModes: accessModes, VolumeMode: volumeMode, HasReference: pvcInPodVolumes(pvc, podList), + OwnerType: pvcToOwnerMap[pvc.Name], }) } return claims, nil } +func (r *ReconcileMigPlan) createPVCToOwnerTypeMap(podList []core.Pod) (map[string]migapi.OwnerType, error) { + pvcToOwnerMap := make(map[string]migapi.OwnerType) + for _, pod := range podList { + for _, vol := range pod.Spec.Volumes { + if vol.PersistentVolumeClaim != nil { + // Only check for owner references if there is a single owner and the volume wasn't set already. + ownerType, ok := pvcToOwnerMap[vol.PersistentVolumeClaim.ClaimName] + if pod.OwnerReferences != nil && len(pod.OwnerReferences) == 1 { + for _, owner := range pod.OwnerReferences { + newOwnerType := migapi.Unknown + if owner.Kind == "StatefulSet" && owner.APIVersion == "apps/v1" { + newOwnerType = migapi.StatefulSet + } else if owner.Kind == "ReplicaSet" && owner.APIVersion == "apps/v1" { + // Check if the owner is a Deployment + replicaSet := &appsv1.ReplicaSet{} + if owner.Name != "" { + err := r.Client.Get(context.TODO(), k8sclient.ObjectKey{ + Namespace: pod.Namespace, + Name: owner.Name, + }, replicaSet) + if err != nil && !errors.IsNotFound(err) { + return nil, err + } + } + if len(replicaSet.OwnerReferences) == 1 && replicaSet.OwnerReferences[0].Kind == "Deployment" && replicaSet.OwnerReferences[0].APIVersion == "apps/v1" { + newOwnerType = migapi.Deployment + } else { + newOwnerType = migapi.ReplicaSet + } + } else if owner.Kind == "Deployment" && owner.APIVersion == "apps/v1" { + newOwnerType = migapi.Deployment + } else if owner.Kind == "DaemonSet" && owner.APIVersion == "apps/v1" { + newOwnerType = migapi.DaemonSet + } else if owner.Kind == "Job" && owner.APIVersion == "batch/v1" { + newOwnerType = migapi.Job + } else if owner.Kind == "CronJob" && owner.APIVersion == "batch/v1" { + newOwnerType = migapi.CronJob + } else if owner.Kind == "VirtualMachineInstance" && (owner.APIVersion == "kubevirt.io/v1" || owner.APIVersion == "kubevirt.io/v1alpha3") { + newOwnerType = migapi.VirtualMachine + } else if owner.Kind == "Pod" && strings.HasPrefix(pod.Name, "hp-") { + newOwnerType = migapi.VirtualMachine + } else { + newOwnerType = migapi.Unknown + } + if !ok { + pvcToOwnerMap[vol.PersistentVolumeClaim.ClaimName] = newOwnerType + } else if ownerType != newOwnerType { + pvcToOwnerMap[vol.PersistentVolumeClaim.ClaimName] = migapi.Unknown + } + } + } else { + pvcToOwnerMap[vol.PersistentVolumeClaim.ClaimName] = migapi.Unknown + } + } + } + } + return pvcToOwnerMap, nil +} + // Determine the supported PV actions. func (r *ReconcileMigPlan) getSupportedActions(pv core.PersistentVolume, claim migapi.PVC) []string { supportedActions := []string{} diff --git a/pkg/controller/migplan/pvlist_test.go b/pkg/controller/migplan/pvlist_test.go index 0c7779ce1..f2359c485 100644 --- a/pkg/controller/migplan/pvlist_test.go +++ b/pkg/controller/migplan/pvlist_test.go @@ -6,8 +6,10 @@ import ( "testing" migapi "github.com/konveyor/mig-controller/pkg/apis/migration/v1alpha1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func Test_getStatefulSetVolumeName(t *testing.T) { @@ -449,3 +451,452 @@ func Test_getMappedNameForPVC(t *testing.T) { }) } } + +func Test_createPVCToOwnerTypeMap(t *testing.T) { + type args struct { + podList []corev1.Pod + migPlan ReconcileMigPlan + } + tests := []struct { + name string + args args + want map[string]migapi.OwnerType + wantErr bool + }{ + { + name: "empty podlist", + args: args{ + podList: []corev1.Pod{}, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{}, + }, + { + name: "pod with no owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Unknown, + }, + }, + { + name: "pod with stateful set owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.StatefulSet, + }, + }, + { + name: "pod with deployment through replicaset owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + APIVersion: "apps/v1", + Name: "rs-0", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{ + Client: fake.NewFakeClient(&appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }, + }, + }), + }, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Deployment, + }, + }, + { + name: "pod with replicaset owner, replicaset not found, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + APIVersion: "apps/v1", + Name: "rs-0", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{ + Client: fake.NewFakeClient(), + }, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.ReplicaSet, + }, + }, + { + name: "pod with replicaset owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + APIVersion: "apps/v1", + Name: "rs-0", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{ + Client: fake.NewFakeClient(&appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs-0", + Namespace: "default", + }, + }), + }, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.ReplicaSet, + }, + }, + { + name: "pod with deployment owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Deployment, + }, + }, + { + name: "pod with daemonset owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.DaemonSet, + }, + }, + { + name: "pod with job owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Job", + APIVersion: "batch/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Job, + }, + }, + { + name: "pod with cron job owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "CronJob", + APIVersion: "batch/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.CronJob, + }, + }, + { + name: "pod with VMI owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "VirtualMachineInstance", + APIVersion: "kubevirt.io/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.VirtualMachine, + }, + }, + { + name: "pod with VMI owner, and multiple pvcs", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "VirtualMachineInstance", + APIVersion: "kubevirt.io/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0", "pvc-1", "pvc-2"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.VirtualMachine, + "pvc-1": migapi.VirtualMachine, + "pvc-2": migapi.VirtualMachine, + }, + }, + { + name: "hotplug pod with pod owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hp-test", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Pod", + APIVersion: "", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.VirtualMachine, + }, + }, + { + name: "pod with unknown owner, and single pvc", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hp-test", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Something", + APIVersion: "unknown", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Unknown, + }, + }, + { + name: "single pvc, owned by multiple pods with different types", + args: args{ + podList: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "VirtualMachineInstance", + APIVersion: "kubevirt.io/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: createPodVolumes([]string{"pvc-0"}), + }, + }, + }, + migPlan: ReconcileMigPlan{}, + }, + want: map[string]migapi.OwnerType{ + "pvc-0": migapi.Unknown, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.args.migPlan.createPVCToOwnerTypeMap(tt.args.podList) + if (err != nil) != tt.wantErr { + t.Errorf("createPVCToOwnerTypeMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + for k, v := range got { + if tt.want[k] != v { + t.Errorf("createPVCToOwnerTypeMap() = %v, want %v", got, tt.want) + } + } + }) + } +} + +func createPodVolumes(pvcNames []string) []corev1.Volume { + volumes := make([]corev1.Volume, len(pvcNames)) + for _, pvcName := range pvcNames { + volumes = append(volumes, corev1.Volume{ + Name: "pvcName", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }) + } + return volumes +} diff --git a/pkg/pods/template.go b/pkg/pods/template.go index 6d10467a0..63dfecd7b 100644 --- a/pkg/pods/template.go +++ b/pkg/pods/template.go @@ -10,6 +10,7 @@ import ( batchv1beta "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + virtv1 "kubevirt.io/api/core/v1" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -66,6 +67,12 @@ func ListTemplatePods(client compat.Client, namespaces []string) ([]corev1.Pod, } pods = append(pods, newPods...) + newPods, err = listVirtualMachineTemplatePodsForNamespace(client, ns) + if err != nil { + return nil, err + } + pods = append(pods, newPods...) + } return pods, nil } @@ -83,6 +90,13 @@ func listDeploymentTemplatePodsForNamespace(client k8sclient.Client, ns string) ObjectMeta: metav1.ObjectMeta{ Name: deployment.GetName(), Namespace: deployment.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: deployment.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -107,6 +121,13 @@ func listDeploymentConfigTemplatePodsForNamespace(client k8sclient.Client, ns st ObjectMeta: metav1.ObjectMeta{ Name: deploymentConfig.GetName(), Namespace: deploymentConfig.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DeploymentConfig", + Name: deploymentConfig.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -131,6 +152,13 @@ func listReplicationControllerTemplatePodsForNamespace(client k8sclient.Client, ObjectMeta: metav1.ObjectMeta{ Name: replicationController.GetName(), Namespace: replicationController.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DeploymentConfig", + Name: replicationController.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -152,6 +180,13 @@ func listDaemonSetTemplatePodsForNamespace(client k8sclient.Client, ns string) ( ObjectMeta: metav1.ObjectMeta{ Name: daemonSet.GetName(), Namespace: daemonSet.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: daemonSet.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -173,6 +208,13 @@ func listStatefulSetTemplatePodsForNamespace(client k8sclient.Client, ns string) ObjectMeta: metav1.ObjectMeta{ Name: statefulSet.GetName(), Namespace: statefulSet.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: statefulSet.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -194,6 +236,13 @@ func listReplicaSetTemplatePodsForNamespace(client k8sclient.Client, ns string) ObjectMeta: metav1.ObjectMeta{ Name: replicaSet.GetName(), Namespace: replicaSet.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: replicaSet.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -215,6 +264,13 @@ func listJobTemplatePodsForNamespace(client k8sclient.Client, ns string) ([]core ObjectMeta: metav1.ObjectMeta{ Name: job.GetName(), Namespace: job.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: job.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -236,6 +292,13 @@ func listCronJobTemplatePodsForNamespace(client k8sclient.Client, ns string) ([] ObjectMeta: metav1.ObjectMeta{ Name: cronJob.GetName(), Namespace: cronJob.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "CronJob", + Name: cronJob.GetName(), + }, + }, }, Spec: podTemplate.Spec, } @@ -243,3 +306,54 @@ func listCronJobTemplatePodsForNamespace(client k8sclient.Client, ns string) ([] } return pods, nil } + +func listVirtualMachineTemplatePodsForNamespace(client k8sclient.Client, ns string) ([]corev1.Pod, error) { + pods := []corev1.Pod{} + list := virtv1.VirtualMachineList{} + err := client.List(context.TODO(), &list, k8sclient.InNamespace(ns)) + if err != nil { + return nil, err + } + for _, vm := range list.Items { + vmi := vm.Spec.Template + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: vm.GetName(), + Namespace: vm.GetNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "kubevirt.io/v1", + Kind: "VirtualMachineInstance", + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{}, + }, + } + for _, vmVolume := range vmi.Spec.Volumes { + if vmVolume.PersistentVolumeClaim == nil && vmVolume.DataVolume == nil { + continue + } + claimName := "" + if vmVolume.PersistentVolumeClaim != nil { + claimName = vmVolume.PersistentVolumeClaim.ClaimName + } else if vmVolume.DataVolume != nil { + claimName = vmVolume.DataVolume.Name + } else { + continue + } + volume := corev1.Volume{ + Name: vmVolume.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, + }, + } + pod.Spec.Volumes = append(pod.Spec.Volumes, volume) + } + pods = append(pods, pod) + } + return pods, nil +}