Skip to content

Commit 17cbcbc

Browse files
authored
Add cluster id to create kafka (#1804)
* feat: addition of descriptions and examples to dedicated * feat: Allows the CLI to install stage env addons when terraforming customer-cloud dataplane clusters * refactor: the default api url is now set when creating a connection instead of in the registercluster cmd file * refactor: update some func and var names * refactor: add cluster-id flag to create kafka cmd to allow kafka creation on CC cluster * refactor: doc and small refactor
1 parent 81cd99c commit 17cbcbc

File tree

74 files changed

+1316
-351
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+1316
-351
lines changed

docs/commands/rhoas.md

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

docs/commands/rhoas_dedicated.md

Lines changed: 7 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

docs/commands/rhoas_dedicated_register-cluster.md

Lines changed: 4 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

docs/commands/rhoas_kafka_create.md

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ require (
2525
github.com/redhat-developer/app-services-sdk-go/accountmgmt v0.3.0
2626
github.com/redhat-developer/app-services-sdk-go/connectormgmt v0.10.0
2727
github.com/redhat-developer/app-services-sdk-go/kafkainstance v0.11.0
28-
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.17.0
28+
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.19.0
2929
github.com/redhat-developer/app-services-sdk-go/registryinstance v0.8.2
3030
github.com/redhat-developer/app-services-sdk-go/registrymgmt v0.11.1
3131
github.com/redhat-developer/app-services-sdk-go/serviceaccountmgmt v0.9.0

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -819,8 +819,8 @@ github.com/redhat-developer/app-services-sdk-go/connectormgmt v0.10.0 h1:CURbTHI
819819
github.com/redhat-developer/app-services-sdk-go/connectormgmt v0.10.0/go.mod h1:t3IV0eKUPgCQjoInv2l8B/NMm2OVemCxGFO/z91wsCU=
820820
github.com/redhat-developer/app-services-sdk-go/kafkainstance v0.11.0 h1:WdwVjneugUC898RSHuc2vLwlcNgPh3oF7/fuxEEGGPg=
821821
github.com/redhat-developer/app-services-sdk-go/kafkainstance v0.11.0/go.mod h1:yazwUm4IHuIWrQ0CCsqN0h7rHZx51nlFbYWKnUn7B84=
822-
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.17.0 h1:Lp0D3pF2A1VYsbFk+KE1QQ+PfdW3HySTnIuUHrvb1iQ=
823-
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.17.0/go.mod h1:ILvcakLEXMLZyRdO//WJZNk9fdFbnU+cM3XrBvubE64=
822+
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.19.0 h1:RVDEeUfBgMzAK+BCnlhfHGHp2YYW6GH6jgYOv2jwYVY=
823+
github.com/redhat-developer/app-services-sdk-go/kafkamgmt v0.19.0/go.mod h1:ILvcakLEXMLZyRdO//WJZNk9fdFbnU+cM3XrBvubE64=
824824
github.com/redhat-developer/app-services-sdk-go/registryinstance v0.8.2 h1:U2je87d/DIeOaQIycg2Y7TLiESmGu0/0rQC5n64Od0Y=
825825
github.com/redhat-developer/app-services-sdk-go/registryinstance v0.8.2/go.mod h1:HkNzOWHTW/SomobQ4343+yR4oTmiyvm85BIWlsh0qbA=
826826
github.com/redhat-developer/app-services-sdk-go/registrymgmt v0.11.1 h1:VOv3wcodQ6EpKp2RRntMMTMuQSnNv1sqLezdbv18mjs=

pkg/cmd/dedicated/dedicated.go

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,12 @@ import (
66
"github.com/spf13/cobra"
77
)
88

9-
// TO-DO add localizer and descriptions
109
func NewDedicatedCmd(f *factory.Factory) *cobra.Command {
1110
cmd := &cobra.Command{
12-
Use: "dedicated",
13-
// Short: f.Localizer.MustLocalize("kafka.topic.cmd.shortDescription"),
14-
Short: "shortDescription",
15-
// Long: f.Localizer.MustLocalize("kafka.topic.cmd.longDescription"),
16-
Long: "longDescription",
17-
// Example: f.Localizer.MustLocalize("kafka.topic.cmd.example"),
18-
Example: "example",
11+
Use: "dedicated",
12+
Short: f.Localizer.MustLocalize("dedicated.cmd.shortDescription"),
13+
Long: f.Localizer.MustLocalize("dedicated.cmd.longDescription"),
14+
Example: f.Localizer.MustLocalize("dedicated.cmd.example"),
1915
}
2016

2117
cmd.AddCommand(

pkg/cmd/dedicated/register/registercluster.go

Lines changed: 43 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@ package register
33
import (
44
"context"
55
"fmt"
6+
"github.com/redhat-developer/app-services-cli/internal/build"
7+
"github.com/redhat-developer/app-services-cli/pkg/core/config"
68
"strings"
79

810
"github.com/AlecAivazis/survey/v2"
@@ -15,12 +17,11 @@ import (
1517
)
1618

1719
type options struct {
18-
selectedClusterId string
19-
// clusterManagementApiUrl string
20-
// accessToken string
21-
clusterList []clustersmgmtv1.Cluster
22-
selectedCluster clustersmgmtv1.Cluster
23-
// clusterMachinePoolList clustersmgmtv1.MachinePoolList
20+
selectedClusterId string
21+
clusterManagementApiUrl string
22+
accessToken string
23+
clusterList []clustersmgmtv1.Cluster
24+
selectedCluster clustersmgmtv1.Cluster
2425
existingMachinePoolList []clustersmgmtv1.MachinePool
2526
selectedClusterMachinePool clustersmgmtv1.MachinePool
2627
requestedMachinePoolNodeCount int
@@ -32,18 +33,18 @@ type options struct {
3233

3334
// list of consts should come from KFM
3435
const (
35-
machinePoolId = "kafka-standard"
36-
machinePoolTaintKey = "bf2.org/kafkaInstanceProfileType"
37-
machinePoolTaintEffect = "NoExecute"
38-
machinePoolTaintValue = "standard"
39-
// machinePoolInstanceType = "m5.2xlarge"
36+
machinePoolId = "kafka-standard"
37+
machinePoolTaintKey = "bf2.org/kafkaInstanceProfileType"
38+
machinePoolTaintEffect = "NoExecute"
39+
machinePoolTaintValue = "standard"
4040
machinePoolInstanceType = "r5.xlarge"
4141
machinePoolLabelKey = "bf2.org/kafkaInstanceProfileType"
4242
machinePoolLabelValue = "standard"
4343
clusterReadyState = "ready"
4444
fleetshardAddonId = "kas-fleetshard-operator"
4545
strimziAddonId = "managed-kafka"
46-
// clusterManagementAPIURL = "https://api.openshift.com"
46+
fleetshardAddonIdQE = "kas-fleetshard-operator-qe"
47+
strimziAddonIdQE = "managed-kafka-qe"
4748
)
4849

4950
func NewRegisterClusterCommand(f *factory.Factory) *cobra.Command {
@@ -62,19 +63,16 @@ func NewRegisterClusterCommand(f *factory.Factory) *cobra.Command {
6263
},
6364
}
6465

65-
// TO-DO add flags
66-
// add a flag for clustermgmt url, i.e --cluster-management-api-url, make the flag hidden, default to api.openshift.com
67-
// supply customer mgmt access token via a flag, i.e --access-token, make the flag hidden, default to ""
6866
flags := kafkaFlagutil.NewFlagSet(cmd, f.Localizer)
69-
// flags.StringVar(&opts.clusterManagementApiUrl, "cluster-management-api-url", clusterManagementAPIURL, "cluster management api url")
70-
// flags.StringVar(&opts.accessToken, "access-token", "", "access token")
71-
// this flag will allow the user to pass the cluster id as a flag
67+
flags.StringVar(&opts.clusterManagementApiUrl, "cluster-mgmt-api-url", "", f.Localizer.MustLocalize("dedicated.registerCluster.flag.clusterMgmtApiUrl.description"))
68+
flags.StringVar(&opts.accessToken, "access-token", "", f.Localizer.MustLocalize("dedicated.registercluster.flag.accessToken.description"))
7269
flags.StringVar(&opts.selectedClusterId, "cluster-id", "", f.Localizer.MustLocalize("dedicated.registerCluster.flag.clusterId.description"))
7370

7471
return cmd
7572
}
7673

7774
func runRegisterClusterCmd(opts *options) (err error) {
75+
// Set the base URL for the cluster management API
7876
err = setListClusters(opts)
7977
if err != nil {
8078
return err
@@ -114,12 +112,11 @@ func runRegisterClusterCmd(opts *options) (err error) {
114112
}
115113

116114
func getClusterList(opts *options) (*clustersmgmtv1.ClusterList, error) {
117-
// ocm client connection
118115
conn, err := opts.f.Connection()
119116
if err != nil {
120117
return nil, err
121118
}
122-
client, cc, err := conn.API().OCMClustermgmt()
119+
client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken)
123120
if err != nil {
124121
return nil, err
125122
}
@@ -147,9 +144,7 @@ func setListClusters(opts *options) error {
147144

148145
func validateClusters(clusters *clustersmgmtv1.ClusterList, cls []clustersmgmtv1.Cluster) []clustersmgmtv1.Cluster {
149146
for _, cluster := range clusters.Slice() {
150-
// TO-DO the cluster must be multiAZ
151147
if cluster.State() == clusterReadyState && cluster.MultiAZ() == true {
152-
// if cluster.State() == clusterReadyState {
153148
cls = append(cls, *cluster)
154149
}
155150
}
@@ -223,7 +218,7 @@ func getMachinePoolList(opts *options) (*clustersmgmtv1.MachinePoolsListResponse
223218
if err != nil {
224219
return nil, err
225220
}
226-
client, cc, err := conn.API().OCMClustermgmt()
221+
client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken)
227222
if err != nil {
228223
return nil, err
229224
}
@@ -295,12 +290,11 @@ func createMachinePoolRequestForDedicated(machinePoolNodeCount int) (*clustersmg
295290

296291
// TO-DO this function should be moved to an ocm client / provider area
297292
func createMachinePool(opts *options, mprequest *clustersmgmtv1.MachinePool) error {
298-
// create a new machine pool via ocm
299293
conn, err := opts.f.Connection()
300294
if err != nil {
301295
return err
302296
}
303-
client, cc, err := conn.API().OCMClustermgmt()
297+
client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken)
304298
if err != nil {
305299
return err
306300
}
@@ -384,17 +378,12 @@ func selectAccessPrivateNetworkInteractivePrompt(opts *options) error {
384378
Help: opts.f.Localizer.MustLocalize("dedicated.registerCluster.prompt.selectPublicNetworkAccess.help"),
385379
Default: false,
386380
}
387-
accessKafkasViaPublicNetwork := false
388-
err := survey.AskOne(prompt, &accessKafkasViaPublicNetwork)
381+
accessFromPublicNetwork := true
382+
err := survey.AskOne(prompt, &accessFromPublicNetwork)
389383
if err != nil {
390384
return err
391385
}
392-
if accessKafkasViaPublicNetwork {
393-
opts.accessKafkasViaPrivateNetwork = false
394-
} else {
395-
opts.accessKafkasViaPrivateNetwork = true
396-
}
397-
386+
opts.accessKafkasViaPrivateNetwork = !accessFromPublicNetwork
398387
return nil
399388
}
400389

@@ -416,7 +405,7 @@ func createAddonWithParams(opts *options, addonId string, params *[]kafkamgmtcli
416405
if err != nil {
417406
return err
418407
}
419-
client, cc, err := conn.API().OCMClustermgmt()
408+
client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken)
420409
if err != nil {
421410
return err
422411
}
@@ -439,6 +428,20 @@ func createAddonWithParams(opts *options, addonId string, params *[]kafkamgmtcli
439428
return nil
440429
}
441430

431+
func getStrimziAddonIdByEnv(con *config.Config) string {
432+
if con.APIUrl == build.ProductionAPIURL {
433+
return strimziAddonId
434+
}
435+
return strimziAddonIdQE
436+
}
437+
438+
func getKafkaFleetShardAddonIdByEnv(con *config.Config) string {
439+
if con.APIUrl == build.ProductionAPIURL {
440+
return fleetshardAddonId
441+
}
442+
return fleetshardAddonIdQE
443+
}
444+
442445
// TO-DO go through errs and make them more user friendly with actual error messages.
443446
func registerClusterWithKasFleetManager(opts *options) error {
444447
clusterIngressDNSName, err := parseDNSURL(opts)
@@ -465,11 +468,15 @@ func registerClusterWithKasFleetManager(opts *options) error {
465468
if err != nil {
466469
return err
467470
}
468-
err = createAddonWithParams(opts, strimziAddonId, nil)
471+
con, err := opts.f.Config.Load()
472+
if err != nil {
473+
return err
474+
}
475+
err = createAddonWithParams(opts, getStrimziAddonIdByEnv(con), nil)
469476
if err != nil {
470477
return err
471478
}
472-
err = createAddonWithParams(opts, fleetshardAddonId, response.FleetshardParameters)
479+
err = createAddonWithParams(opts, getKafkaFleetShardAddonIdByEnv(con), response.FleetshardParameters)
473480
if err != nil {
474481
return err
475482
}

pkg/cmd/kafka/create/create.go

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,11 @@ const (
4949
)
5050

5151
type options struct {
52-
name string
53-
provider string
54-
region string
55-
size string
52+
name string
53+
provider string
54+
region string
55+
size string
56+
clusterId string
5657

5758
marketplaceAcctId string
5859
marketplace string
@@ -143,6 +144,7 @@ func NewCreateCommand(f *factory.Factory) *cobra.Command {
143144
flags.BoolVarP(&opts.dryRun, "dry-run", "", false, f.Localizer.MustLocalize("kafka.create.flag.dryrun.description"))
144145
flags.StringVar(&opts.billingModel, FlagBillingModel, "", f.Localizer.MustLocalize("kafka.create.flag.billingModel.description"))
145146
flags.AddBypassTermsCheck(&opts.bypassChecks)
147+
flags.StringVar(&opts.clusterId, "cluster-id", "", f.Localizer.MustLocalize("kafka.create.flag.clusterId.description"))
146148

147149
_ = cmd.RegisterFlagCompletionFunc(FlagProvider, func(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
148150
return GetCloudProviderCompletionValues(f)
@@ -237,6 +239,7 @@ func runCreate(opts *options) error {
237239
Name: opts.name,
238240
Region: &opts.region,
239241
CloudProvider: &opts.provider,
242+
ClusterId: *kafkamgmtclient.NewNullableString(&opts.clusterId),
240243
}
241244

242245
if !opts.bypassChecks {

pkg/core/localize/locales/en/cmd/dedicated.en.toml

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ one = 'The ID of the OpenShift cluster to register:'
2626
one = 'Select the ready cluster to register'
2727

2828
[dedicated.registerCluster.prompt.selectPublicNetworkAccess.message]
29-
one = 'Would you like your Kakfas to be accessible via a public network?'
29+
one = 'Would you like your Kafkas to be accessible via a public network?'
3030

3131
[dedicated.registerCluster.prompt.selectPublicNetworkAccess.help]
3232
one = 'If you select yes, your Kafka will be accessible via a public network'
@@ -42,4 +42,34 @@ There will be N/3 streaming units in your Kafka cluster, where N is the machine
4242
'''
4343

4444
[dedicated.registerCluster.info.foundValidMachinePool]
45-
one = 'Using the valid machine pool:'
45+
one = 'Using the valid machine pool:'
46+
47+
[dedicated.cmd.shortDescription]
48+
one = 'Manage your OpenShift clusters which host your kafkas.'
49+
50+
[dedicated.cmd.longDescription]
51+
one = '''
52+
Red Hat OpenShift Streams for Apache Kafka allows you to use your own OpenShift clusters to provision your
53+
kafkas. These Kafka instances will be managed by Red Hat OpenShift Streams for Apache Kafka.
54+
'''
55+
56+
[dedicated.cmd.example]
57+
one = '''
58+
# Register an OpenShift cluster with Red Hat OpenShift Streams for Apache Kafka.
59+
rhoas dedicated register-cluster
60+
'''
61+
62+
[dedicated.registerCluster.kfmResponse.status.clusterAccepted]
63+
one = '''
64+
The cluster has been accepted. Red Hat OpenShift Streams for Apache Kafka control plane is now
65+
terraforming your cluster for use with your Kafkas.
66+
'''
67+
68+
[dedicated.registerCluster.kfmResponse.status.conflict]
69+
one = 'The cluster has already been registered with Red Hat OpenShift Streams for Apache Kafka.'
70+
71+
[dedicated.registerCluster.flag.clusterMgmtApiUrl.description]
72+
one = 'The API URL of the OpenShift Cluster Management API.'
73+
74+
[dedicated.registercluster.flag.accessToken.description]
75+
one = 'The access token to use to authenticate with the OpenShift Cluster Management API.'

0 commit comments

Comments
 (0)