diff --git a/api/server/handlers/cluster/delete_node_group.go b/api/server/handlers/cluster/delete_node_group.go new file mode 100644 index 0000000000..ab40d87a2e --- /dev/null +++ b/api/server/handlers/cluster/delete_node_group.go @@ -0,0 +1,94 @@ +package cluster + +import ( + "net/http" + + "connectrpc.com/connect" + + porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1" + "github.com/porter-dev/porter/internal/telemetry" + + "github.com/porter-dev/porter/api/server/authz" + "github.com/porter-dev/porter/api/server/handlers" + "github.com/porter-dev/porter/api/server/shared" + "github.com/porter-dev/porter/api/server/shared/apierrors" + "github.com/porter-dev/porter/api/server/shared/config" + "github.com/porter-dev/porter/api/types" + "github.com/porter-dev/porter/internal/models" +) + +// DeleteNodeGroupHandler is the handler for the /delete-node-group endpoint +type DeleteNodeGroupHandler struct { + handlers.PorterHandlerReadWriter + authz.KubernetesAgentGetter +} + +// NewDeleteNodeGroupHandler returns a handler for handling node group requests +func NewDeleteNodeGroupHandler( + config *config.Config, + decoderValidator shared.RequestDecoderValidator, + writer shared.ResultWriter, +) *DeleteNodeGroupHandler { + return &DeleteNodeGroupHandler{ + PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer), + KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config), + } +} + +// DeleteNodeGroupRequest represents the request to delete a node group +type DeleteNodeGroupRequest struct { + // NodeGroupId is the id of the node group to delete + NodeGroupId string `json:"node_group_id"` +} + +// DeleteNodeGroupResponse represents the response from deleting a node group +type DeleteNodeGroupResponse struct { + // ContractRevisionId is the id of the contract revision created by the deletion + ContractRevisionId string `json:"contract_revision_id"` +} + +// ServeHTTP handles GET requests to list node groups +func (c *DeleteNodeGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx, span := telemetry.NewSpan(r.Context(), "serve-list-nodes") + defer span.End() + + project, _ := r.Context().Value(types.ProjectScope).(*models.Project) + cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster) + + request := &DeleteNodeGroupRequest{} + + ok := c.DecodeAndValidate(w, r, request) + if !ok { + err := telemetry.Error(ctx, span, nil, "error decoding delete node group request") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest)) + } + + if request.NodeGroupId == "" { + err := telemetry.Error(ctx, span, nil, "node group id is empty") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest)) + } + + userNodeGroupReq := connect.NewRequest(&porterv1.DeleteUserNodeGroupRequest{ + ProjectId: int64(project.ID), + ClusterId: int64(cluster.ID), + UserNodeGroupId: request.NodeGroupId, + }) + + ccpResp, err := c.Config().ClusterControlPlaneClient.DeleteUserNodeGroup(ctx, userNodeGroupReq) + if err != nil { + err := telemetry.Error(ctx, span, err, "error deleting user node group") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError)) + return + } + if ccpResp == nil || ccpResp.Msg == nil { + err := telemetry.Error(ctx, span, err, "ccp resp msg is nil") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError)) + return + } + + resp := &DeleteNodeGroupResponse{ + ContractRevisionId: ccpResp.Msg.ContractRevisionId, + } + + c.WriteResult(w, r, resp) +} diff --git a/api/server/handlers/cluster/node_groups.go b/api/server/handlers/cluster/node_groups.go new file mode 100644 index 0000000000..65c7bcc165 --- /dev/null +++ b/api/server/handlers/cluster/node_groups.go @@ -0,0 +1,94 @@ +package cluster + +import ( + "net/http" + + "connectrpc.com/connect" + + porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1" + "github.com/porter-dev/porter/internal/telemetry" + + "github.com/porter-dev/porter/api/server/authz" + "github.com/porter-dev/porter/api/server/handlers" + "github.com/porter-dev/porter/api/server/shared" + "github.com/porter-dev/porter/api/server/shared/apierrors" + "github.com/porter-dev/porter/api/server/shared/config" + "github.com/porter-dev/porter/api/types" + "github.com/porter-dev/porter/internal/models" +) + +// NodeGroupsHandler is the handler for the /node-groups endpoint +type NodeGroupsHandler struct { + handlers.PorterHandlerWriter + authz.KubernetesAgentGetter +} + +// NewNodeGroupsHandler returns a handler for handling node group requests +func NewNodeGroupsHandler( + config *config.Config, + writer shared.ResultWriter, +) *NodeGroupsHandler { + return &NodeGroupsHandler{ + PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer), + KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config), + } +} + +// NodeGroupsResponse represents the response to a list node groups request +type NodeGroupsResponse struct { + NodeGroups []NodeGroup `json:"node_groups"` +} + +// NodeGroup represents a node group managed by a user +type NodeGroup struct { + Name string `json:"name"` + Id string `json:"id"` + InstanceType string `json:"instance_type"` + RamMb int32 `json:"ram_mb"` + CpuCores float32 `json:"cpu_cores"` + GpuCores int32 `json:"gpu_cores"` +} + +// ServeHTTP handles GET requests to list node groups +func (c *NodeGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx, span := telemetry.NewSpan(r.Context(), "serve-list-nodes") + defer span.End() + + project, _ := r.Context().Value(types.ProjectScope).(*models.Project) + cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster) + + userNodeGroupReq := connect.NewRequest(&porterv1.UserNodeGroupsRequest{ + ProjectId: int64(project.ID), + ClusterId: int64(cluster.ID), + }) + + ccpResp, err := c.Config().ClusterControlPlaneClient.UserNodeGroups(ctx, userNodeGroupReq) + if err != nil { + err := telemetry.Error(ctx, span, err, "error creating deployment target") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError)) + return + } + if ccpResp == nil || ccpResp.Msg == nil { + err := telemetry.Error(ctx, span, err, "ccp resp msg is nil") + c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError)) + return + } + + var nodeGroups []NodeGroup + for _, ng := range ccpResp.Msg.UserNodeGroups { + nodeGroups = append(nodeGroups, NodeGroup{ + Name: ng.Name, + Id: ng.Id, + InstanceType: ng.InstanceType, + RamMb: ng.RamMb, + CpuCores: ng.CpuCores, + GpuCores: ng.GpuCores, + }) + } + + res := &NodeGroupsResponse{ + NodeGroups: nodeGroups, + } + + c.WriteResult(w, r, res) +} diff --git a/api/server/router/cluster.go b/api/server/router/cluster.go index 7bd50f7490..3103a6e9bd 100644 --- a/api/server/router/cluster.go +++ b/api/server/router/cluster.go @@ -890,6 +890,63 @@ func getClusterRoutes( Router: r, }) + // GET /api/projects/{project_id}/clusters/{cluster_id}/node-groups -> cluster.NewNodeGroupsHandler + nodeGroupsEndpoint := factory.NewAPIEndpoint( + &types.APIRequestMetadata{ + Verb: types.APIVerbGet, + Method: types.HTTPVerbGet, + Path: &types.Path{ + Parent: basePath, + RelativePath: relPath + "/node-groups", + }, + Scopes: []types.PermissionScope{ + types.UserScope, + types.ProjectScope, + types.ClusterScope, + }, + }, + ) + + nodeGroupsHandler := cluster.NewNodeGroupsHandler( + config, + factory.GetResultWriter(), + ) + + routes = append(routes, &router.Route{ + Endpoint: nodeGroupsEndpoint, + Handler: nodeGroupsHandler, + Router: r, + }) + + // GET /api/projects/{project_id}/clusters/{cluster_id}/delete-node-group -> cluster.NewNodeGroupsHandler + deleteNodeGroupEndpoint := factory.NewAPIEndpoint( + &types.APIRequestMetadata{ + Verb: types.APIVerbUpdate, + Method: types.HTTPVerbPost, + Path: &types.Path{ + Parent: basePath, + RelativePath: relPath + "/delete-node-group", + }, + Scopes: []types.PermissionScope{ + types.UserScope, + types.ProjectScope, + types.ClusterScope, + }, + }, + ) + + deleteNodeGroupHandler := cluster.NewDeleteNodeGroupHandler( + config, + factory.GetDecoderValidator(), + factory.GetResultWriter(), + ) + + routes = append(routes, &router.Route{ + Endpoint: deleteNodeGroupEndpoint, + Handler: deleteNodeGroupHandler, + Router: r, + }) + // GET /api/projects/{project_id}/clusters/{cluster_id}/nodes/{node_name} -> cluster.NewGetNodeHandler getNodeEndpoint := factory.NewAPIEndpoint( &types.APIRequestMetadata{ diff --git a/dashboard/package-lock.json b/dashboard/package-lock.json index 4f69dd136e..10f677dc8c 100644 --- a/dashboard/package-lock.json +++ b/dashboard/package-lock.json @@ -100,7 +100,7 @@ "@babel/preset-typescript": "^7.15.0", "@ianvs/prettier-plugin-sort-imports": "^4.1.1", "@pmmmwh/react-refresh-webpack-plugin": "^0.4.3", - "@porter-dev/api-contracts": "^0.2.164", + "@porter-dev/api-contracts": "^0.2.167", "@testing-library/jest-dom": "^4.2.4", "@testing-library/react": "^9.3.2", "@testing-library/user-event": "^7.1.2", @@ -2786,9 +2786,9 @@ } }, "node_modules/@porter-dev/api-contracts": { - "version": "0.2.164", - "resolved": "https://registry.npmjs.org/@porter-dev/api-contracts/-/api-contracts-0.2.164.tgz", - "integrity": "sha512-yq3rX6YVbTFCTh4p1UdXNSUHsSD/ED0M5JdhaRee9PRBAJ5wsNg2FUXN4zYcOM3e5qqmgRwF25RFc9jm4wKthQ==", + "version": "0.2.155", + "resolved": "https://registry.npmjs.org/@porter-dev/api-contracts/-/api-contracts-0.2.155.tgz", + "integrity": "sha512-Tar/IsKoUSmz8Q8Fw9ozflrAI+yAGzOIdx5WmZ5iCSCkvudSLnDp7xQ0po/traPzYLUldZjaNsw0KKXnOb1myQ==", "dev": true, "dependencies": { "@bufbuild/protobuf": "^1.1.0" diff --git a/dashboard/package.json b/dashboard/package.json index 4be9258892..123dbc5399 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -107,7 +107,7 @@ "@babel/preset-typescript": "^7.15.0", "@ianvs/prettier-plugin-sort-imports": "^4.1.1", "@pmmmwh/react-refresh-webpack-plugin": "^0.4.3", - "@porter-dev/api-contracts": "^0.2.164", + "@porter-dev/api-contracts": "^0.2.167", "@testing-library/jest-dom": "^4.2.4", "@testing-library/react": "^9.3.2", "@testing-library/user-event": "^7.1.2", diff --git a/dashboard/src/components/porter/TrashDelete.tsx b/dashboard/src/components/porter/TrashDelete.tsx new file mode 100644 index 0000000000..801edc2ca7 --- /dev/null +++ b/dashboard/src/components/porter/TrashDelete.tsx @@ -0,0 +1,44 @@ +import React from "react"; +import styled from "styled-components"; + +type Props = { + handleDelete: () => void; +}; + +const TrashDelete: React.FC = ({ handleDelete }) => { + return ( + { + e.stopPropagation(); + handleDelete(); + }} + type={"button"} + > + delete + + ); +}; + +export default TrashDelete; + +const ActionButton = styled.button` + position: relative; + border: none; + background: none; + color: white; + padding: 5px; + display: flex; + justify-content: center; + align-items: center; + border-radius: 50%; + cursor: pointer; + color: #aaaabb; + :hover { + color: white; + } + + > span { + font-size: 20px; + } + margin-right: 5px; +`; diff --git a/dashboard/src/lib/clusters/index.ts b/dashboard/src/lib/clusters/index.ts index f8e98ecd3e..f743b98d35 100644 --- a/dashboard/src/lib/clusters/index.ts +++ b/dashboard/src/lib/clusters/index.ts @@ -42,6 +42,7 @@ export function updateExistingClusterContract( clientClusterContract.cluster.cloudProviderCredentialsId, projectId: clientClusterContract.cluster.projectId, }); + console.log(clientClusterContract.cluster.config); match(clientClusterContract.cluster.config) .with({ kind: "EKS" }, (config) => { if (cluster.kindValues.case !== "eksKind") { @@ -71,6 +72,8 @@ export function updateExistingClusterContract( ); }); + console.log(cluster); + return cluster; } @@ -93,7 +96,10 @@ function clientEKSConfigToProto( .with("MONITORING", () => NodeGroupType.MONITORING) .with("APPLICATION", () => NodeGroupType.APPLICATION) .with("CUSTOM", () => NodeGroupType.CUSTOM) + .with("USER", () => NodeGroupType.USER) .otherwise(() => NodeGroupType.UNSPECIFIED), + nodeGroupName: ng.nodeGroupName, + nodeGroupId: ng.nodeGroupId, }); }), network: new AWSClusterNetwork({ @@ -159,7 +165,10 @@ function clientGKEConfigToProto( () => GKENodePoolType.GKE_NODE_POOL_TYPE_APPLICATION ) .with("CUSTOM", () => GKENodePoolType.GKE_NODE_POOL_TYPE_CUSTOM) + .with("USER", () => GKENodePoolType.GKE_NODE_POOL_TYPE_CUSTOM) .otherwise(() => GKENodePoolType.GKE_NODE_POOL_TYPE_UNSPECIFIED), + nodePoolName: ng.nodeGroupName, + nodePoolId: ng.nodeGroupId, }); }), network: new GKENetwork({ @@ -189,7 +198,10 @@ function clientAKSConfigToProto( .with("MONITORING", () => NodePoolType.MONITORING) .with("APPLICATION", () => NodePoolType.APPLICATION) .with("CUSTOM", () => NodePoolType.CUSTOM) + .with("USER", () => NodePoolType.USER) .otherwise(() => NodePoolType.UNSPECIFIED), + nodePoolName: ng.nodeGroupName, + nodePoolId: ng.nodeGroupId, }); }), skuTier: match(clientConfig.skuTier) @@ -249,7 +261,10 @@ const clientEKSConfigFromProto = (value: EKS): EKSClientClusterConfig => { .with(NodeGroupType.MONITORING, () => "MONITORING" as const) .with(NodeGroupType.APPLICATION, () => "APPLICATION" as const) .with(NodeGroupType.CUSTOM, () => "CUSTOM" as const) + .with(NodeGroupType.USER, () => "USER" as const) .otherwise(() => "UNKNOWN" as const), + nodeGroupName: ng.nodeGroupName, + nodeGroupId: ng.nodeGroupId, }; }), cidrRange: value.network?.vpcCidr ?? value.cidrRange ?? "", // network will always be provided in one of those fields @@ -320,7 +335,10 @@ const clientGKEConfigFromProto = (value: GKE): GKEClientClusterConfig => { GKENodePoolType.GKE_NODE_POOL_TYPE_CUSTOM, () => "CUSTOM" as const ) + .with(GKENodePoolType.GKE_NODE_POOL_TYPE_USER, () => "USER" as const) .otherwise(() => "UNKNOWN" as const), + nodeGroupName: ng.nodePoolName, + nodeGroupIdId: ng.nodePoolId, }; }), cidrRange: value.network?.cidrRange ?? "", // network will always be provided @@ -345,7 +363,10 @@ const clientAKSConfigFromProto = (value: AKS): AKSClientClusterConfig => { .with(NodePoolType.MONITORING, () => "MONITORING" as const) .with(NodePoolType.APPLICATION, () => "APPLICATION" as const) .with(NodePoolType.CUSTOM, () => "CUSTOM" as const) + .with(NodePoolType.USER, () => "USER" as const) .otherwise(() => "UNKNOWN" as const), + nodeGroupName: ng.nodePoolName, + nodeGroupId: ng.nodePoolId, }; }), skuTier: match(value.skuTier) diff --git a/dashboard/src/lib/clusters/types.ts b/dashboard/src/lib/clusters/types.ts index cfa2c69c4d..ddadc6ee6c 100644 --- a/dashboard/src/lib/clusters/types.ts +++ b/dashboard/src/lib/clusters/types.ts @@ -328,6 +328,17 @@ export type ClientNode = { instanceType: ClientMachineType; }; +// Node Group +export const nodeGroupValidator = z.object({ + name: z.string(), + id: z.string(), + instance_type: z.string(), + ram_mb: z.number(), + cpu_cores: z.number(), + gpu_cores: z.number(), +}); +export type ClientNodeGroup = z.infer; + // Cluster export const clusterValidator = z.object({ id: z.number(), @@ -437,6 +448,7 @@ const nodeGroupTypeValidator = z.enum([ "MONITORING", "APPLICATION", "CUSTOM", + "USER", ]); export type NodeGroupType = z.infer; const eksNodeGroupValidator = z.object({ @@ -444,18 +456,27 @@ const eksNodeGroupValidator = z.object({ minInstances: z.number(), maxInstances: z.number(), nodeGroupType: nodeGroupTypeValidator, + // name is required for USER node groups + nodeGroupName: z.string().optional(), + nodeGroupId: z.string().optional(), }); const gkeNodeGroupValidator = z.object({ instanceType: z.string(), minInstances: z.number(), maxInstances: z.number(), nodeGroupType: nodeGroupTypeValidator, + // name is only required for USER node groups + nodeGroupName: z.string().optional(), + nodeGroupId: z.string().optional(), }); const aksNodeGroupValidator = z.object({ instanceType: z.string(), minInstances: z.number(), maxInstances: z.number(), nodeGroupType: nodeGroupTypeValidator, + // name is only required for USER node groups + nodeGroupName: z.string().optional(), + nodeGroupId: z.string().optional(), }); const cidrRangeValidator = z diff --git a/dashboard/src/lib/hooks/useCluster.ts b/dashboard/src/lib/hooks/useCluster.ts index 7b52c8dbb7..ab7d8e0fe9 100644 --- a/dashboard/src/lib/hooks/useCluster.ts +++ b/dashboard/src/lib/hooks/useCluster.ts @@ -24,12 +24,14 @@ import { clusterValidator, contractValidator, createContractResponseValidator, + nodeGroupValidator, nodeValidator, preflightCheckValidator, type APIContract, type ClientCluster, type ClientClusterContract, type ClientNode, + type ClientNodeGroup, type ClientPreflightCheck, type ClusterState, type ContractCondition, @@ -588,6 +590,61 @@ export const useClusterNodeList = ({ }; }; +type TUseClusterNodeGroups = { + nodeGroups: ClientNodeGroup[]; + isLoading: boolean; +}; +export const useClusterNodeGroups = ({ + clusterId, + refetchInterval = 3000, +}: { + clusterId: number | undefined; + refetchInterval?: number; +}): TUseClusterNodeGroups => { + const { currentProject } = useContext(Context); + + const { data, isLoading } = useQuery( + ["getClusterNodeList", currentProject?.id, clusterId], + async () => { + if ( + !currentProject?.id || + currentProject.id === -1 || + !clusterId || + clusterId === -1 + ) { + return; + } + + const res = await api.getNodeGroups( + "", + {}, + { project_id: currentProject.id, cluster_id: clusterId } + ); + + const parsed = await z + .object({ + node_groups: z.array(nodeGroupValidator).nullish().default([]), + }) + .parseAsync(res.data); + + return parsed; + }, + { + refetchInterval, + enabled: + !!currentProject && + currentProject.id !== -1 && + !!clusterId && + clusterId !== -1, + } + ); + + return { + nodeGroups: data?.node_groups ?? [], + isLoading, + }; +}; + export const uniqueCidrMetadataValidator = z.object({ "overlapping-service-cidr": z.string(), "overlapping-vpc-cidr": z.string(), diff --git a/dashboard/src/lib/porter-apps/services.ts b/dashboard/src/lib/porter-apps/services.ts index 418987b9e3..209ed494b2 100644 --- a/dashboard/src/lib/porter-apps/services.ts +++ b/dashboard/src/lib/porter-apps/services.ts @@ -120,6 +120,9 @@ export const serviceValidator = z.object({ smartOptimization: serviceBooleanValidator.optional(), terminationGracePeriodSeconds: serviceNumberValidator.optional(), sleep: serviceBooleanValidator.optional(), + computeResources: z.object({ + id: z.string(), + }).array(), config: z.discriminatedUnion("type", [ webConfigValidator, workerConfigValidator, @@ -160,6 +163,9 @@ export type SerializedService = { }; terminationGracePeriodSeconds?: number; sleep?: boolean; + computeResources?: Array<{ + id: string; + }>; config: | { type: "web"; @@ -342,6 +348,7 @@ export function serializeService(service: ClientService): SerializedService { }, terminationGracePeriodSeconds: service.terminationGracePeriodSeconds?.value, sleep: service.sleep?.value, + computeResources: service.computeResources, config: match(service.config) .with({ type: "web" }, (config) => Object.freeze({ @@ -420,6 +427,7 @@ export function deserializeService({ port: ServiceField.number(service.port, override?.port), cpuCores: ServiceField.number(service.cpuCores, override?.cpuCores), sleep: ServiceField.boolean(service.sleep, override?.sleep), + computeResources: service.computeResources ?? [], gpu: { enabled: ServiceField.boolean( service.gpu?.enabled, @@ -642,6 +650,9 @@ export function serviceProto(service: SerializedService): Service { instancesOptional: service.instances, type: serviceTypeEnumProto(config.type), sleep: service.sleep, + computeResources: { + computeResources: service.computeResources, + }, config: { value: { ...config, @@ -659,6 +670,9 @@ export function serviceProto(service: SerializedService): Service { instancesOptional: service.instances, type: serviceTypeEnumProto(config.type), sleep: service.sleep, + computeResources: { + computeResources: service.computeResources, + }, config: { value: { ...config, @@ -675,6 +689,9 @@ export function serviceProto(service: SerializedService): Service { runOptional: service.run, instancesOptional: service.instances, type: serviceTypeEnumProto(config.type), + computeResources: { + computeResources: service.computeResources, + }, config: { value: { ...config, @@ -693,6 +710,9 @@ export function serviceProto(service: SerializedService): Service { runOptional: service.run, instancesOptional: service.instances, type: serviceTypeEnumProto(config.type), + computeResources: { + computeResources: service.computeResources, + }, config: { value: {}, case: "jobConfig", @@ -706,6 +726,9 @@ export function serviceProto(service: SerializedService): Service { ...service, runOptional: service.run, instancesOptional: service.instances, + computeResources: { + computeResources: service.computeResources, + }, type: serviceTypeEnumProto(config.type), config: { value: {}, @@ -738,6 +761,7 @@ export function serializedServiceFromProto({ run: service.runOptional ?? service.run, instances: service.instancesOptional ?? service.instances, sleep: service.sleep, + computeResources: service.computeResources?.computeResources, config: { type: "web" as const, autoscaling: value.autoscaling ? value.autoscaling : undefined, @@ -751,6 +775,7 @@ export function serializedServiceFromProto({ run: service.runOptional ?? service.run, instances: service.instancesOptional ?? service.instances, sleep: service.sleep, + computeResources: service.computeResources?.computeResources, config: { type: "worker" as const, autoscaling: value.autoscaling ? value.autoscaling : undefined, @@ -763,6 +788,7 @@ export function serializedServiceFromProto({ ? { ...service, run: service.runOptional ?? service.run, + computeResources: service.computeResources?.computeResources, instances: service.instancesOptional ?? service.instances, config: { type: "predeploy" as const, @@ -772,6 +798,7 @@ export function serializedServiceFromProto({ ? { ...service, run: service.runOptional ?? service.run, + computeResources: service.computeResources?.computeResources, instances: service.instancesOptional ?? service.instances, config: { type: "initdeploy" as const, @@ -780,6 +807,7 @@ export function serializedServiceFromProto({ : { ...service, run: service.runOptional ?? service.run, + computeResources: service.computeResources?.computeResources, instances: service.instancesOptional ?? service.instances, config: { type: "job" as const, diff --git a/dashboard/src/main/home/app-dashboard/validate-apply/services-settings/tabs/Resources.tsx b/dashboard/src/main/home/app-dashboard/validate-apply/services-settings/tabs/Resources.tsx index 3d2eb2ae3a..6c60edca59 100644 --- a/dashboard/src/main/home/app-dashboard/validate-apply/services-settings/tabs/Resources.tsx +++ b/dashboard/src/main/home/app-dashboard/validate-apply/services-settings/tabs/Resources.tsx @@ -1,13 +1,15 @@ -import React, { useContext, useMemo } from "react"; +import React, { useContext, useEffect, useMemo } from "react"; import { Controller, useFormContext } from "react-hook-form"; import { match } from "ts-pattern"; import Checkbox from "components/porter/Checkbox"; import { ControlledInput } from "components/porter/ControlledInput"; import InputSlider from "components/porter/InputSlider"; +import Selector from "components/porter/Selector"; import Spacer from "components/porter/Spacer"; import Text from "components/porter/Text"; import { useClusterContext } from "main/home/infrastructure-dashboard/ClusterContextProvider"; +import { type ClientNodeGroup } from "lib/clusters/types"; import { type PorterAppFormData } from "lib/porter-apps"; import { getServiceResourceAllowances, @@ -30,13 +32,16 @@ const Resources: React.FC = ({ service, lifecycleJobType, }) => { - const { control, register, watch } = useFormContext(); + const { control, register, watch, setValue } = + useFormContext(); const { currentProject } = useContext(Context); - const { nodes } = useClusterContext(); + const { nodes, userNodeGroups } = useClusterContext(); const { maxRamMegabytes, maxCpuCores } = useMemo(() => { return getServiceResourceAllowances(nodes, currentProject?.sandbox_enabled); }, [nodes]); + const computeResources = watch(`app.services.${index}.computeResources`); + const autoscalingEnabled = watch( `app.services.${index}.config.autoscaling.enabled`, { @@ -59,8 +64,68 @@ const Resources: React.FC = ({ : defaultMessage; }; + const nodeGroupsWithDeletedNodes = userNodeGroups.concat( + computeResources.map((cr) => { + return { + id: cr.id, + name: "[deleted]", + instance_type: "", + cpu_cores: 0, + ram_mb: 0, + gpu_cores: 0, + }; + }) + ); + + const computeResource = + (computeResources.length > 0 && + nodeGroupsWithDeletedNodes.find( + (ng) => ng.id === computeResources[0].id + )) || + null; + + const cpuCoreLimit = computeResource + ? computeResource.cpu_cores * 0.75 + : maxCpuCores; + const ramMbLimit = computeResource + ? computeResource.ram_mb * 0.75 + : maxRamMegabytes; + + useEffect(() => { + if (!computeResource && computeResources.length > 0) { + setValue(`app.services.${index}.computeResources`, []); + } + }, [computeResource]); + return ( <> + + + activeValue={computeResource?.id || "default"} + width="300px" + options={nodeGroupsWithDeletedNodes + .map((ng: ClientNodeGroup) => { + return { + value: ng.id, + label: ng.name, + key: ng.id, + }; + }) + .concat([{ value: "default", label: "Default", key: "default" }])} + setActiveValue={(value: string) => { + if (value === "default") { + setValue(`app.services.${index}.computeResources`, []); + return; + } + + setValue(`app.services.${index}.computeResources`, [ + { + id: value, + }, + ]); + }} + label={"Node Group"} + /> = ({ label="CPUs: " unit="Cores" min={0.1} - max={maxCpuCores} + max={cpuCoreLimit} color={"#3f51b5"} value={value.value.toString()} setValue={(e) => { @@ -111,7 +176,7 @@ const Resources: React.FC = ({ label="RAM: " unit="MB" min={10} - max={maxRamMegabytes} + max={ramMbLimit} color={"#3f51b5"} value={value.value.toString()} setValue={(e) => { diff --git a/dashboard/src/main/home/infrastructure-dashboard/ClusterContextProvider.tsx b/dashboard/src/main/home/infrastructure-dashboard/ClusterContextProvider.tsx index 7295efe83f..b666796f4f 100644 --- a/dashboard/src/main/home/infrastructure-dashboard/ClusterContextProvider.tsx +++ b/dashboard/src/main/home/infrastructure-dashboard/ClusterContextProvider.tsx @@ -15,8 +15,13 @@ import { type ClientCluster, type ClientClusterContract, type ClientNode, + type ClientNodeGroup, } from "lib/clusters/types"; -import { useCluster, useClusterNodeList } from "lib/hooks/useCluster"; +import { + useCluster, + useClusterNodeGroups, + useClusterNodeList, +} from "lib/hooks/useCluster"; import { useClusterAnalytics } from "lib/hooks/useClusterAnalytics"; import api from "shared/api"; @@ -26,11 +31,13 @@ import notFound from "assets/not-found.png"; type ClusterContextType = { cluster: ClientCluster; nodes: ClientNode[]; + userNodeGroups: ClientNodeGroup[]; projectId: number; isClusterUpdating: boolean; updateClusterVanityName: (name: string) => void; updateCluster: (clientContract: ClientClusterContract) => Promise; deleteCluster: () => Promise; + deleteNodeGroup: (nodeGroupId: string) => Promise; }; const ClusterContext = createContext(null); @@ -64,6 +71,10 @@ const ClusterContextProvider: React.FC = ({ const { reportToAnalytics } = useClusterAnalytics(); const { nodes } = useClusterNodeList({ clusterId, refetchInterval }); + const { nodeGroups: userNodeGroups } = useClusterNodeGroups({ + clusterId, + refetchInterval, + }); const paramsExist = !!clusterId && !!currentProject && currentProject.id !== -1; @@ -152,6 +163,26 @@ const ClusterContextProvider: React.FC = ({ return cluster?.contract?.condition === "" ?? false; }, [cluster?.contract?.condition]); + const deleteNodeGroup = useCallback( + async (nodeGroupId: string) => { + if (!paramsExist) { + return; + } + + await api.deleteNodeGroup( + "; } @@ -191,11 +222,13 @@ const ClusterContextProvider: React.FC = ({ value={{ cluster, nodes, + userNodeGroups, projectId: currentProject.id, isClusterUpdating, updateClusterVanityName, updateCluster, deleteCluster, + deleteNodeGroup, }} > {children} diff --git a/dashboard/src/main/home/infrastructure-dashboard/shared/NodeGroups.tsx b/dashboard/src/main/home/infrastructure-dashboard/shared/NodeGroups.tsx index b774c41b32..2180f7ffe0 100644 --- a/dashboard/src/main/home/infrastructure-dashboard/shared/NodeGroups.tsx +++ b/dashboard/src/main/home/infrastructure-dashboard/shared/NodeGroups.tsx @@ -1,4 +1,4 @@ -import React, { useContext, useMemo } from "react"; +import React, {useContext, useMemo, useState} from "react"; import _ from "lodash"; import { Controller, useFieldArray, useFormContext } from "react-hook-form"; import styled from "styled-components"; @@ -12,7 +12,7 @@ import PorterOperatorComponent from "components/porter/PorterOperatorComponent"; import Select from "components/porter/Select"; import Spacer from "components/porter/Spacer"; import Text from "components/porter/Text"; -import { +import { type ClientClusterContract, type ClientMachineType, } from "lib/clusters/types"; @@ -20,6 +20,10 @@ import { import { Context } from "shared/Context"; import chip from "assets/computer-chip.svg"; import world from "assets/world.svg"; +import {useClusterContext} from "../ClusterContextProvider"; +import Modal from "../../../../components/porter/Modal"; +import TrashDelete from "../../../../components/porter/TrashDelete"; +import axios from "axios"; type Props = { availableMachineTypes: ClientMachineType[]; @@ -32,7 +36,11 @@ const NodeGroups: React.FC = ({ isCreating = false, }) => { const { control } = useFormContext(); - const { currentProject } = useContext(Context); + const { currentProject, currentCluster } = useContext(Context); + const { deleteNodeGroup } = useClusterContext(); + const [ nodeGroupDeletionId, setNodeGroupDeletionId ] = useState(""); + const [ nodeGroupDeletionConfirmation, setNodeGroupDeletionConfirmation ] = useState(""); + const [ nodeGroupDeletionError, setNodeGroupDeletionError ] = useState(""); const { fields: nodeGroups, append, @@ -41,6 +49,17 @@ const NodeGroups: React.FC = ({ control, name: "cluster.config.nodeGroups", }); + + const onRemove = (index: number): void => { + const id = nodeGroups[index].nodeGroupId; + + if (id) { + setNodeGroupDeletionId(id); + } else { + remove(index); + } + }; + const displayableNodeGroups = useMemo(() => { const dng = _.groupBy( nodeGroups.map((ng, idx) => { @@ -54,6 +73,8 @@ const NodeGroups: React.FC = ({ return dng; }, [nodeGroups]); + const nodeGroupDeletionName = nodeGroups.find((ng) => ng.nodeGroupId === nodeGroupDeletionId)?.nodeGroupName || ""; + return ( {displayableNodeGroups.APPLICATION?.map((ng) => { @@ -180,6 +201,142 @@ const NodeGroups: React.FC = ({ ); })} + {displayableNodeGroups.USER?.map((ng) => { + return ( + + + + + {ng.nodeGroup.nodeGroupName} + {ng.nodeGroup.nodeGroupId && + <> + + + (id: {ng.nodeGroup.nodeGroupId}) + + + } + + { + onRemove(ng.idx); + }}/> + + } + > + ( + <> + { + onChange({ + ...value, + minInstances: parseInt(newMinInstances), + }); + }} + placeholder="ex: 1" + /> + + + Maximum number of application nodes + + + { + onChange({ + ...value, + maxInstances: parseInt(newMaxInstances), + }); + }} + placeholder="ex: 10" + /> + + + + ) : ( + <> + + + Minimum number of application nodes + + + { + onChange({ + ...value, + minInstances: parseInt(newMinInstances), + }); + }} + placeholder="ex: 1" + /> + + + Maximum number of application nodes + + + { + onChange({ + ...value, + maxInstances: parseInt(newMaxInstances), + }); + }} + placeholder="ex: 10" + /> + + )} + + )} + /> + + ); + })} <> {displayableNodeGroups.MONITORING?.map((ng) => { @@ -468,29 +625,123 @@ const NodeGroups: React.FC = ({ ); })} - {currentProject?.gpu_enabled && - (displayableNodeGroups.CUSTOM ?? []).length === 0 && - availableMachineTypes.filter((t) => t.isGPU).length > 0 && ( + {currentProject?.gpu_enabled && + (displayableNodeGroups.CUSTOM ?? []).length === 0 && + availableMachineTypes.filter((t) => t.isGPU).length > 0 && ( + + )} + { nodeGroupDeletionId && ( + { + setNodeGroupDeletionId(""); + }} + > + + Are you sure you want to delete this node group? + + + + + + )} + { nodeGroupDeletionId && ( + { + setNodeGroupDeletionId(""); + }} + > + + Are you sure you want to delete this node group? + + + + + + + )} ); }; diff --git a/dashboard/src/shared/api.tsx b/dashboard/src/shared/api.tsx index 3899a51360..b68604724c 100644 --- a/dashboard/src/shared/api.tsx +++ b/dashboard/src/shared/api.tsx @@ -1489,6 +1489,28 @@ const getClusterStatus = baseApi< return `/api/projects/${pathParams.project_id}/clusters/${pathParams.cluster_id}/status`; }); +const getNodeGroups = baseApi< + {}, + { + project_id: number; + cluster_id: number; + } +>("GET", (pathParams) => { + return `/api/projects/${pathParams.project_id}/clusters/${pathParams.cluster_id}/node-groups`; +}); + +const deleteNodeGroup = baseApi< + { + node_group_id: string; + }, + { + project_id: number; + cluster_id: number; + } +>("POST", (pathParams) => { + return `/api/projects/${pathParams.project_id}/clusters/${pathParams.cluster_id}/delete-node-group`; +}); + const getClusterNodes = baseApi< {}, { @@ -3832,6 +3854,8 @@ export default { getClusterNodes, getClusterNode, getClusterStatus, + getNodeGroups, + deleteNodeGroup, getConfigMap, getPRDeploymentList, getPRDeploymentByID, diff --git a/go.mod b/go.mod index 96b70c1f75..67d03ff031 100644 --- a/go.mod +++ b/go.mod @@ -89,7 +89,7 @@ require ( github.com/nats-io/nats.go v1.24.0 github.com/open-policy-agent/opa v0.44.0 github.com/ory/client-go v1.9.0 - github.com/porter-dev/api-contracts v0.2.164 + github.com/porter-dev/api-contracts v0.2.167 github.com/riandyrn/otelchi v0.5.1 github.com/santhosh-tekuri/jsonschema/v5 v5.0.1 github.com/stefanmcshane/helm v0.0.0-20221213002717-88a4a2c6e77d diff --git a/go.sum b/go.sum index 8557a216ca..96ec57d10e 100644 --- a/go.sum +++ b/go.sum @@ -1570,8 +1570,8 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/porter-dev/api-contracts v0.2.164 h1:99Y96YH9CfAl/aPjnqXbsiEgMHUFxDM9wC5G5sQnmyQ= -github.com/porter-dev/api-contracts v0.2.164/go.mod h1:VV5BzXd02ZdbWIPLVP+PX3GKawJSGQnxorVT2sUZALU= +github.com/porter-dev/api-contracts v0.2.167 h1:uD/iME8reF/VgBoo49dmvUhZIZW7xbj2NjpFLZZWXwk= +github.com/porter-dev/api-contracts v0.2.167/go.mod h1:VV5BzXd02ZdbWIPLVP+PX3GKawJSGQnxorVT2sUZALU= github.com/porter-dev/switchboard v0.0.3 h1:dBuYkiVLa5Ce7059d6qTe9a1C2XEORFEanhbtV92R+M= github.com/porter-dev/switchboard v0.0.3/go.mod h1:xSPzqSFMQ6OSbp42fhCi4AbGbQbsm6nRvOkrblFeXU4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=