diff --git a/scripts/token-log-collector/Dockerfile b/scripts/token-log-collector/Dockerfile new file mode 100644 index 000000000..5de63a5cb --- /dev/null +++ b/scripts/token-log-collector/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.18 as builder + +ENV GOOS=linux +ENV GOARCH=amd64 + + +RUN apt-get update && \ + apt-get -y install \ + bash \ + git \ + make + +ADD . /go/src/gitlab.eng.vmware.com/shamasundara/token-log-collector +WORKDIR /go/src/gitlab.eng.vmware.com/shamasundara/token-log-collector + +ENV GOPATH /go +ENV CONVERSION_GEN_BIN token-log-collector + +RUN mkdir -p /build/vcloud +RUN go build -o /build/vcloud/token-log-collector cmd/main.go + diff --git a/scripts/token-log-collector/Makefile b/scripts/token-log-collector/Makefile new file mode 100644 index 000000000..fbe296c3e --- /dev/null +++ b/scripts/token-log-collector/Makefile @@ -0,0 +1,12 @@ +GITROOT := $(shell git rev-parse --show-toplevel) +TOKEN_CONTAINER := tokens_logger +TOKEN_BINARY_DIR := $(GITROOT)/scripts/token-log-collector/bin +TOKEN_BINARY := $(TOKEN_BINARY_DIR)/token-log-collector +all: bin-dir generate-binary +bin-dir: + mkdir -p $(TOKEN_BINARY_DIR) +generate-binary: bin-dir + docker build . -t token-loggger + docker create -ti --name $(TOKEN_CONTAINER) token-loggger:latest bash + docker cp $(TOKEN_CONTAINER):/build/vcloud/token-log-collector $(TOKEN_BINARY) + docker rm $(TOKEN_CONTAINER) \ No newline at end of file diff --git a/scripts/token-log-collector/README.md b/scripts/token-log-collector/README.md new file mode 100644 index 000000000..cd02d1bc1 --- /dev/null +++ b/scripts/token-log-collector/README.md @@ -0,0 +1,23 @@ +# token-log-collector + +## Generate linux binary + +To generate the token-log-collector linux binary, execute the following command + +```shell +make generate-binary +``` + +The above make target stores the binary in `./bin` directory. This binary can be used in a linux environment + +## Using the binary to collect debug API token related failure + +Use the token-log-collector binary to create a sample API token and verify authentication to VCD +```shell +token-log-collector default --vcd-host https:// -org -user -password --skip-verify --client-name cseclient +``` + +Use the token-log-collector binary to verify if an API token is valid +```shell +token-log-collector verify-token --vcd-host https:// -org cluster-org -user user --skip-verify --refresh-token +``` \ No newline at end of file diff --git a/scripts/token-log-collector/bin/token-log-collector b/scripts/token-log-collector/bin/token-log-collector new file mode 100755 index 000000000..ed8040ae6 Binary files /dev/null and b/scripts/token-log-collector/bin/token-log-collector differ diff --git a/scripts/token-log-collector/cmd/main.go b/scripts/token-log-collector/cmd/main.go new file mode 100644 index 000000000..0b1e3161f --- /dev/null +++ b/scripts/token-log-collector/cmd/main.go @@ -0,0 +1,256 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "github.com/vmware/go-vcloud-director/v2/govcd" + "github.com/vmware/go-vcloud-director/v2/util" + "net/http" + "net/url" + "os" + "strings" +) + +var ( + logFile = "token_verification.log" +) + +const ( + VCloudApiVersion = "36.0" +) + +func main() { + var err error + + defaultCmd := flag.NewFlagSet("default", flag.ExitOnError) + verifyCmd := flag.NewFlagSet("verify-token", flag.ExitOnError) + + var vcdHost string + defaultCmd.StringVar(&vcdHost, "vcd-host", "", "VCD Site URL (Mandatory)") + verifyCmd.StringVar(&vcdHost, "vcd-host", "", "VCD Site URL (Mandatory)") + + var orgName string + defaultCmd.StringVar(&orgName, "org", "system", "orgName to which the user belongs to (use 'system' or system org)") + verifyCmd.StringVar(&orgName, "org", "system", "orgName to which the user belongs to (use 'system' or system org)") + + var username string + defaultCmd.StringVar(&username, "user", "username", "username of the user account being used") + verifyCmd.StringVar(&username, "user", "username", "username of the user account being used") + + var password string + defaultCmd.StringVar(&password, "password", "password", "password of the user account being used") + verifyCmd.StringVar(&password, "password", "password", "password of the user account being used") + + var oauthClientName string + defaultCmd.StringVar(&oauthClientName, "client-name", "oauthclient", "oauth client name to create the refresh token") + verifyCmd.StringVar(&oauthClientName, "client-name", "oauthclient", "oauth client name to create the refresh token") + + var insecure bool + defaultCmd.BoolVar(&insecure, "skip-verify", false, "skip verify ") + verifyCmd.BoolVar(&insecure, "skip-verify", false, "skip verify ") + + var refreshToken string + defaultCmd.StringVar(&refreshToken, "refresh-token", "", "refresh token value - used when running with verify-token sub-command") + verifyCmd.StringVar(&refreshToken, "refresh-token", "", "refresh token value - used when running with verify-token sub-command") + + flag.Parse() + + util.EnableLogging = true + util.ApiLogFileName = logFile + util.SetLog() + util.LogHttpRequest = true + util.LogHttpResponse = true + util.LogPasswords = false + + switch os.Args[1] { + case "default": + defaultCmd.Parse(os.Args[2:]) + if vcdHost == "" { + flag.Usage() + panic(fmt.Errorf("--vcd-host is a mandatory parameter")) + } + + if orgName == "" { + flag.Usage() + panic(fmt.Errorf("--org is a mandatory parameter")) + } + + fmt.Println("creating refresh token ...") + // create refresh token + refreshToken, clientID, err := createRefreshToken(vcdHost, orgName, username, password, oauthClientName, insecure) + if err != nil { + panic(fmt.Errorf("failed to create refresh token: [%v]", err)) + } + + // verify refresh token + fmt.Println("verifying refresh token ...") + err = verifyRefreshToken(vcdHost, orgName, refreshToken, insecure) + if err != nil { + panic(fmt.Errorf("failed to verify refresh token: [%v]", err)) + } + + if username == "" { + flag.Usage() + panic(fmt.Errorf("--user is a mandatory parameter")) + } + + if oauthClientName == "" { + flag.Usage() + panic(fmt.Errorf("--client-name is a mandatory parameter")) + } + + fmt.Println("deleting the refresh token ...") + err = deleteRefreshToken(vcdHost, orgName, username, password, clientID, insecure) + + fmt.Println("successfully authenticated through refresh token") + case "verify-token": + verifyCmd.Parse(os.Args[2:]) + if vcdHost == "" { + flag.Usage() + panic(fmt.Errorf("--vcd-host is a mandatory parameter")) + } + + if orgName == "" { + flag.Usage() + panic(fmt.Errorf("--org is a mandatory parameter")) + } + if refreshToken == "" { + flag.Usage() + panic(fmt.Errorf("please specify the refresh token using --refresh-token option")) + } + + // verify refresh token + fmt.Println("verifying refresh token ...") + err = verifyRefreshToken(vcdHost, orgName, refreshToken, insecure) + if err != nil { + panic(fmt.Errorf("failed to verify refresh token: [%v]", err)) + } + default: + panic(fmt.Errorf("invalid sub-command")) + } +} + +func createRefreshToken(vcdHost, orgName, username, password, oauthClientName string, insecure bool) (string, string, error) { + href := fmt.Sprintf("%s/api", vcdHost) + u, err := url.ParseRequestURI(href) + if err != nil { + return "", "", fmt.Errorf("unable to parse url [%s]: %s", href, err) + } + vcdClient := govcd.NewVCDClient(*u, insecure) + vcdClient.Client.APIVersion = VCloudApiVersion + + resp, err := vcdClient.GetAuthResponse(username, password, orgName) + if err != nil { + return "", "", fmt.Errorf("unable to authenticate [%s/%s] for url [%s]: [%+v] : [%v]", + orgName, username, href, resp, err) + } + + // create client request url + createOAuthClientRequestHref := fmt.Sprintf("%s/oauth/tenant/%s/register", vcdHost, orgName) + if strings.ToLower(orgName) == "system" { + createOAuthClientRequestHref = fmt.Sprintf("%s/oauth/provider/register", vcdHost) + } + createOauthClientUrl, err := url.ParseRequestURI(createOAuthClientRequestHref) + if err != nil { + return "", "", fmt.Errorf("failed to parse url to create oauth client: [%v]", err) + } + oauthClientRequestBody := map[string]interface{}{ + "client_name": oauthClientName, + } + b := new(bytes.Buffer) + if err = json.NewEncoder(b).Encode(oauthClientRequestBody); err != nil { + return "", "", fmt.Errorf("error creating request body to create oauth client [%s]", oauthClientName) + } + vcdClient.Client.SetCustomHeader(map[string]string{ + "Accept": "application/*;version=36.0", + "Content-Type": "application/json", + }) + createOauthReq := vcdClient.Client.NewRequest(nil, http.MethodPost, *createOauthClientUrl, b) + createOauthResp, err := vcdClient.Client.Http.Do(createOauthReq) + if err != nil { + return "", "", fmt.Errorf("error creating oauth client [%s]: [%v]", oauthClientName, err) + } + if createOauthResp.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("invalid response creating oauth client [%s]: [%v]", oauthClientName, err) + } + var createOauthRespBody map[string]interface{} + err = json.NewDecoder(createOauthResp.Body).Decode(&createOauthRespBody) + if err != nil { + return "", "", fmt.Errorf("failed to parse response body to map[string]interface{}: [%v]", err) + } + clientID := createOauthRespBody["client_id"].(string) + + // create refresh token + createApiTokenHref := fmt.Sprintf("%s/oauth/tenant/%s/token", vcdHost, orgName) + if strings.ToLower(orgName) == "system" { + createApiTokenHref = fmt.Sprintf("%s/oauth/provider/token", vcdHost) + } + createRefreshTokenUrl, err := url.ParseRequestURI(createApiTokenHref) + data := bytes.NewBufferString(fmt.Sprintf("client_id=%s&grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion=%s", clientID, vcdClient.Client.VCDToken)) + + vcdClient.Client.RemoveCustomHeader() + vcdClient.Client.SetCustomHeader(map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + "Accept": "application/*;version=36.0", + }) + createApiTokenReq := vcdClient.Client.NewRequest(nil, http.MethodPost, *createRefreshTokenUrl, data) + createApiTokenResp, err := vcdClient.Client.Http.Do(createApiTokenReq) + if err != nil { + return "", "", fmt.Errorf("error creating a refresh token: [%v]", err) + } + if createApiTokenResp.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("invalid response creating a refresh token: [%d]", createApiTokenResp.StatusCode) + } + var apiTokenRespBody map[string]interface{} + err = json.NewDecoder(createApiTokenResp.Body).Decode(&apiTokenRespBody) + if err != nil { + return "", "", fmt.Errorf("failed to parse response body to map[string]interface{}: [%v]", err) + } + vcdClient.Client.RemoveCustomHeader() + fmt.Println("Created refersh token with client name", oauthClientName, " and expiring on ", apiTokenRespBody["expires_in"].(float64)) + return apiTokenRespBody["refresh_token"].(string), clientID, nil +} + +func verifyRefreshToken(vcdHost, orgName, refreshToken string, insecure bool) error { + href := fmt.Sprintf("%s/api", vcdHost) + u, err := url.ParseRequestURI(href) + if err != nil { + return fmt.Errorf("unable to parse url [%s]: %s", href, err) + } + vcdClient := govcd.NewVCDClient(*u, insecure) + vcdClient.Client.APIVersion = VCloudApiVersion + + if err = vcdClient.SetToken(orgName, govcd.ApiTokenHeader, refreshToken); err != nil { + return fmt.Errorf("failed to set api token to vcd client: [%v]", err) + } + return nil +} + +func deleteRefreshToken(vcdHost, orgName, username, password, clientID string, insecure bool) error { + href := fmt.Sprintf("%s/api", vcdHost) + u, err := url.ParseRequestURI(href) + if err != nil { + return fmt.Errorf("unable to parse url [%s]: %s", href, err) + } + vcdClient := govcd.NewVCDClient(*u, insecure) + vcdClient.Client.APIVersion = VCloudApiVersion + + resp, err := vcdClient.GetAuthResponse(username, password, orgName) + if err != nil { + return fmt.Errorf("unable to authenticate [%s/%s] for url [%s]: [%+v] : [%v]", + orgName, username, href, resp, err) + } + + currentSessionUrl, err := vcdClient.Client.OpenApiBuildEndpoint(fmt.Sprintf("1.0.0/tokens/urn:vcloud:token:%s", clientID)) + if err != nil { + return fmt.Errorf("failed to construct delete token url [%v]", err) + } + + err = vcdClient.Client.OpenApiDeleteItem(vcdClient.Client.APIVersion, currentSessionUrl, url.Values{}, nil) + if err != nil { + return fmt.Errorf("error while deleting refresh token [%v]", err) + } + return nil +} diff --git a/scripts/token-log-collector/go.mod b/scripts/token-log-collector/go.mod new file mode 100644 index 000000000..af69aef11 --- /dev/null +++ b/scripts/token-log-collector/go.mod @@ -0,0 +1,20 @@ +module gitlab.eng.vmware.com/shamasundara/token-log-collector + +go 1.17 + +require ( + github.com/vmware/go-vcloud-director/v2 v2.14.0-rc.3 + go.uber.org/zap v1.21.0 +) + +require ( + github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195 // indirect + github.com/hashicorp/go-version v1.2.0 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/kr/text v0.1.0 // indirect + github.com/peterhellberg/link v1.1.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/scripts/token-log-collector/go.sum b/scripts/token-log-collector/go.sum new file mode 100644 index 000000000..bed5e68fc --- /dev/null +++ b/scripts/token-log-collector/go.sum @@ -0,0 +1,73 @@ +github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195 h1:c4mLfegoDw6OhSJXTd2jUEQgZUQuJWtocudb97Qn9EM= +github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/peterhellberg/link v1.1.0 h1:s2+RH8EGuI/mI4QwrWGSYQCRz7uNgip9BaM04HKu5kc= +github.com/peterhellberg/link v1.1.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/vmware/go-vcloud-director/v2 v2.14.0-rc.3 h1:VJolXzgomaRPrgzSr0EduuUtJIJEf5RdoLbktZFQqIc= +github.com/vmware/go-vcloud-director/v2 v2.14.0-rc.3/go.mod h1:2BS1yw61VN34WI0/nUYoInFvBc3Zcuf84d4ESiAAl68= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/scripts/token-log-collector/vendor/github.com/araddon/dateparse/.travis.yml b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/.travis.yml new file mode 100644 index 000000000..f071cf95a --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.10.x + - 1.11.x + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/scripts/token-log-collector/vendor/github.com/araddon/dateparse/LICENSE b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/LICENSE new file mode 100644 index 000000000..f675ed313 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Aaron Raddon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/github.com/araddon/dateparse/README.md b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/README.md new file mode 100644 index 000000000..005e456fc --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/README.md @@ -0,0 +1,282 @@ +Go Date Parser +--------------------------- + +Parse many date strings without knowing format in advance. Uses a scanner to read bytes and use a state machine to find format. Much faster than shotgun based parse methods. See [bench_test.go](https://github.com/araddon/dateparse/blob/master/bench_test.go) for performance comparison. + + +[![Code Coverage](https://codecov.io/gh/araddon/dateparse/branch/master/graph/badge.svg)](https://codecov.io/gh/araddon/dateparse) +[![GoDoc](https://godoc.org/github.com/araddon/dateparse?status.svg)](http://godoc.org/github.com/araddon/dateparse) +[![Build Status](https://travis-ci.org/araddon/dateparse.svg?branch=master)](https://travis-ci.org/araddon/dateparse) +[![Go ReportCard](https://goreportcard.com/badge/araddon/dateparse)](https://goreportcard.com/report/araddon/dateparse) + +**MM/DD/YYYY VS DD/MM/YYYY** Right now this uses mm/dd/yyyy WHEN ambiguous if this is not desired behavior, use `ParseStrict` which will fail on ambiguous date strings. + +**Timezones** The location your server is configured affects the results! See example or https://play.golang.org/p/IDHRalIyXh and last paragraph here https://golang.org/pkg/time/#Parse. + + +```go + +// Normal parse. Equivalent Timezone rules as time.Parse() +t, err := dateparse.ParseAny("3/1/2014") + +// Parse Strict, error on ambigous mm/dd vs dd/mm dates +t, err := dateparse.ParseStrict("3/1/2014") +> returns error + +// Return a string that represents the layout to parse the given date-time. +layout, err := dateparse.ParseFormat("May 8, 2009 5:57:51 PM") +> "Jan 2, 2006 3:04:05 PM" + +``` + +cli tool for testing dateformats +---------------------------------- + +[Date Parse CLI](https://github.com/araddon/dateparse/blob/master/dateparse) + + +Extended example +------------------- + +https://github.com/araddon/dateparse/blob/master/example/main.go + +```go +package main + +import ( + "flag" + "fmt" + "time" + + "github.com/apcera/termtables" + "github.com/araddon/dateparse" +) + +var examples = []string{ + "May 8, 2009 5:57:51 PM", + "oct 7, 1970", + "oct 7, '70", + "oct. 7, 1970", + "oct. 7, 70", + "Mon Jan 2 15:04:05 2006", + "Mon Jan 2 15:04:05 MST 2006", + "Mon Jan 02 15:04:05 -0700 2006", + "Monday, 02-Jan-06 15:04:05 MST", + "Mon, 02 Jan 2006 15:04:05 MST", + "Tue, 11 Jul 2017 16:28:13 +0200 (CEST)", + "Mon, 02 Jan 2006 15:04:05 -0700", + "Thu, 4 Jan 2018 17:53:36 +0000", + "Mon Aug 10 15:44:11 UTC+0100 2015", + "Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)", + "September 17, 2012 10:09am", + "September 17, 2012 at 10:09am PST-08", + "September 17, 2012, 10:10:09", + "October 7, 1970", + "October 7th, 1970", + "12 Feb 2006, 19:17", + "12 Feb 2006 19:17", + "7 oct 70", + "7 oct 1970", + "03 February 2013", + "1 July 2013", + "2013-Feb-03", + // mm/dd/yy + "3/31/2014", + "03/31/2014", + "08/21/71", + "8/1/71", + "4/8/2014 22:05", + "04/08/2014 22:05", + "4/8/14 22:05", + "04/2/2014 03:00:51", + "8/8/1965 12:00:00 AM", + "8/8/1965 01:00:01 PM", + "8/8/1965 01:00 PM", + "8/8/1965 1:00 PM", + "8/8/1965 12:00 AM", + "4/02/2014 03:00:51", + "03/19/2012 10:11:59", + "03/19/2012 10:11:59.3186369", + // yyyy/mm/dd + "2014/3/31", + "2014/03/31", + "2014/4/8 22:05", + "2014/04/08 22:05", + "2014/04/2 03:00:51", + "2014/4/02 03:00:51", + "2012/03/19 10:11:59", + "2012/03/19 10:11:59.3186369", + // Chinese + "2014年04月08日", + // yyyy-mm-ddThh + "2006-01-02T15:04:05+0000", + "2009-08-12T22:15:09-07:00", + "2009-08-12T22:15:09", + "2009-08-12T22:15:09Z", + // yyyy-mm-dd hh:mm:ss + "2014-04-26 17:24:37.3186369", + "2012-08-03 18:31:59.257000000", + "2014-04-26 17:24:37.123", + "2013-04-01 22:43", + "2013-04-01 22:43:22", + "2014-12-16 06:20:00 UTC", + "2014-12-16 06:20:00 GMT", + "2014-04-26 05:24:37 PM", + "2014-04-26 13:13:43 +0800", + "2014-04-26 13:13:43 +0800 +08", + "2014-04-26 13:13:44 +09:00", + "2012-08-03 18:31:59.257000000 +0000 UTC", + "2015-09-30 18:48:56.35272715 +0000 UTC", + "2015-02-18 00:12:00 +0000 GMT", + "2015-02-18 00:12:00 +0000 UTC", + "2015-02-08 03:02:00 +0300 MSK m=+0.000000001", + "2015-02-08 03:02:00.001 +0300 MSK m=+0.000000001", + "2017-07-19 03:21:51+00:00", + "2014-04-26", + "2014-04", + "2014", + "2014-05-11 08:20:13,787", + // mm.dd.yy + "3.31.2014", + "03.31.2014", + "08.21.71", + "2014.03", + "2014.03.30", + // yyyymmdd and similar + "20140601", + "20140722105203", + // unix seconds, ms, micro, nano + "1332151919", + "1384216367189", + "1384216367111222", + "1384216367111222333", +} + +var ( + timezone = "" +) + +func main() { + flag.StringVar(&timezone, "timezone", "UTC", "Timezone aka `America/Los_Angeles` formatted time-zone") + flag.Parse() + + if timezone != "" { + // NOTE: This is very, very important to understand + // time-parsing in go + loc, err := time.LoadLocation(timezone) + if err != nil { + panic(err.Error()) + } + time.Local = loc + } + + table := termtables.CreateTable() + + table.AddHeaders("Input", "Parsed, and Output as %v") + for _, dateExample := range examples { + t, err := dateparse.ParseLocal(dateExample) + if err != nil { + panic(err.Error()) + } + table.AddRow(dateExample, fmt.Sprintf("%v", t)) + } + fmt.Println(table.Render()) +} + +/* ++-------------------------------------------------------+-----------------------------------------+ +| Input | Parsed, and Output as %v | ++-------------------------------------------------------+-----------------------------------------+ +| May 8, 2009 5:57:51 PM | 2009-05-08 17:57:51 +0000 UTC | +| oct 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| oct 7, '70 | 1970-10-07 00:00:00 +0000 UTC | +| oct. 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| oct. 7, 70 | 1970-10-07 00:00:00 +0000 UTC | +| Mon Jan 2 15:04:05 2006 | 2006-01-02 15:04:05 +0000 UTC | +| Mon Jan 2 15:04:05 MST 2006 | 2006-01-02 15:04:05 +0000 MST | +| Mon Jan 02 15:04:05 -0700 2006 | 2006-01-02 15:04:05 -0700 -0700 | +| Monday, 02-Jan-06 15:04:05 MST | 2006-01-02 15:04:05 +0000 MST | +| Mon, 02 Jan 2006 15:04:05 MST | 2006-01-02 15:04:05 +0000 MST | +| Tue, 11 Jul 2017 16:28:13 +0200 (CEST) | 2017-07-11 16:28:13 +0200 +0200 | +| Mon, 02 Jan 2006 15:04:05 -0700 | 2006-01-02 15:04:05 -0700 -0700 | +| Thu, 4 Jan 2018 17:53:36 +0000 | 2018-01-04 17:53:36 +0000 UTC | +| Mon Aug 10 15:44:11 UTC+0100 2015 | 2015-08-10 15:44:11 +0000 UTC | +| Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) | 2015-07-03 18:04:07 +0100 GMT | +| September 17, 2012 10:09am | 2012-09-17 10:09:00 +0000 UTC | +| September 17, 2012 at 10:09am PST-08 | 2012-09-17 10:09:00 -0800 PST | +| September 17, 2012, 10:10:09 | 2012-09-17 10:10:09 +0000 UTC | +| October 7, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| October 7th, 1970 | 1970-10-07 00:00:00 +0000 UTC | +| 12 Feb 2006, 19:17 | 2006-02-12 19:17:00 +0000 UTC | +| 12 Feb 2006 19:17 | 2006-02-12 19:17:00 +0000 UTC | +| 7 oct 70 | 1970-10-07 00:00:00 +0000 UTC | +| 7 oct 1970 | 1970-10-07 00:00:00 +0000 UTC | +| 03 February 2013 | 2013-02-03 00:00:00 +0000 UTC | +| 1 July 2013 | 2013-07-01 00:00:00 +0000 UTC | +| 2013-Feb-03 | 2013-02-03 00:00:00 +0000 UTC | +| 3/31/2014 | 2014-03-31 00:00:00 +0000 UTC | +| 03/31/2014 | 2014-03-31 00:00:00 +0000 UTC | +| 08/21/71 | 1971-08-21 00:00:00 +0000 UTC | +| 8/1/71 | 1971-08-01 00:00:00 +0000 UTC | +| 4/8/2014 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 04/08/2014 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 4/8/14 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 04/2/2014 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 8/8/1965 12:00:00 AM | 1965-08-08 00:00:00 +0000 UTC | +| 8/8/1965 01:00:01 PM | 1965-08-08 13:00:01 +0000 UTC | +| 8/8/1965 01:00 PM | 1965-08-08 13:00:00 +0000 UTC | +| 8/8/1965 1:00 PM | 1965-08-08 13:00:00 +0000 UTC | +| 8/8/1965 12:00 AM | 1965-08-08 00:00:00 +0000 UTC | +| 4/02/2014 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 03/19/2012 10:11:59 | 2012-03-19 10:11:59 +0000 UTC | +| 03/19/2012 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC | +| 2014/3/31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014/03/31 | 2014-03-31 00:00:00 +0000 UTC | +| 2014/4/8 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014/04/08 22:05 | 2014-04-08 22:05:00 +0000 UTC | +| 2014/04/2 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2014/4/02 03:00:51 | 2014-04-02 03:00:51 +0000 UTC | +| 2012/03/19 10:11:59 | 2012-03-19 10:11:59 +0000 UTC | +| 2012/03/19 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC | +| 2014年04月08日 | 2014-04-08 00:00:00 +0000 UTC | +| 2006-01-02T15:04:05+0000 | 2006-01-02 15:04:05 +0000 UTC | +| 2009-08-12T22:15:09-07:00 | 2009-08-12 22:15:09 -0700 -0700 | +| 2009-08-12T22:15:09 | 2009-08-12 22:15:09 +0000 UTC | +| 2009-08-12T22:15:09Z | 2009-08-12 22:15:09 +0000 UTC | +| 2014-04-26 17:24:37.3186369 | 2014-04-26 17:24:37.3186369 +0000 UTC | +| 2012-08-03 18:31:59.257000000 | 2012-08-03 18:31:59.257 +0000 UTC | +| 2014-04-26 17:24:37.123 | 2014-04-26 17:24:37.123 +0000 UTC | +| 2013-04-01 22:43 | 2013-04-01 22:43:00 +0000 UTC | +| 2013-04-01 22:43:22 | 2013-04-01 22:43:22 +0000 UTC | +| 2014-12-16 06:20:00 UTC | 2014-12-16 06:20:00 +0000 UTC | +| 2014-12-16 06:20:00 GMT | 2014-12-16 06:20:00 +0000 UTC | +| 2014-04-26 05:24:37 PM | 2014-04-26 17:24:37 +0000 UTC | +| 2014-04-26 13:13:43 +0800 | 2014-04-26 13:13:43 +0800 +0800 | +| 2014-04-26 13:13:43 +0800 +08 | 2014-04-26 13:13:43 +0800 +0800 | +| 2014-04-26 13:13:44 +09:00 | 2014-04-26 13:13:44 +0900 +0900 | +| 2012-08-03 18:31:59.257000000 +0000 UTC | 2012-08-03 18:31:59.257 +0000 UTC | +| 2015-09-30 18:48:56.35272715 +0000 UTC | 2015-09-30 18:48:56.35272715 +0000 UTC | +| 2015-02-18 00:12:00 +0000 GMT | 2015-02-18 00:12:00 +0000 UTC | +| 2015-02-18 00:12:00 +0000 UTC | 2015-02-18 00:12:00 +0000 UTC | +| 2015-02-08 03:02:00 +0300 MSK m=+0.000000001 | 2015-02-08 03:02:00 +0300 +0300 | +| 2015-02-08 03:02:00.001 +0300 MSK m=+0.000000001 | 2015-02-08 03:02:00.001 +0300 +0300 | +| 2017-07-19 03:21:51+00:00 | 2017-07-19 03:21:51 +0000 UTC | +| 2014-04-26 | 2014-04-26 00:00:00 +0000 UTC | +| 2014-04 | 2014-04-01 00:00:00 +0000 UTC | +| 2014 | 2014-01-01 00:00:00 +0000 UTC | +| 2014-05-11 08:20:13,787 | 2014-05-11 08:20:13.787 +0000 UTC | +| 3.31.2014 | 2014-03-31 00:00:00 +0000 UTC | +| 03.31.2014 | 2014-03-31 00:00:00 +0000 UTC | +| 08.21.71 | 1971-08-21 00:00:00 +0000 UTC | +| 2014.03 | 2014-03-01 00:00:00 +0000 UTC | +| 2014.03.30 | 2014-03-30 00:00:00 +0000 UTC | +| 20140601 | 2014-06-01 00:00:00 +0000 UTC | +| 20140722105203 | 2014-07-22 10:52:03 +0000 UTC | +| 1332151919 | 2012-03-19 10:11:59 +0000 UTC | +| 1384216367189 | 2013-11-12 00:32:47.189 +0000 UTC | +| 1384216367111222 | 2013-11-12 00:32:47.111222 +0000 UTC | +| 1384216367111222333 | 2013-11-12 00:32:47.111222333 +0000 UTC | ++-------------------------------------------------------+-----------------------------------------+ +*/ + +``` diff --git a/scripts/token-log-collector/vendor/github.com/araddon/dateparse/parseany.go b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/parseany.go new file mode 100644 index 000000000..5e66aa6d6 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/araddon/dateparse/parseany.go @@ -0,0 +1,1864 @@ +// Package dateparse parses date-strings without knowing the format +// in advance, using a fast lex based approach to eliminate shotgun +// attempts. It leans towards US style dates when there is a conflict. +package dateparse + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// func init() { +// gou.SetupLogging("debug") +// gou.SetColorOutput() +// } + +var months = []string{ + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", +} + +type dateState uint8 +type timeState uint8 + +const ( + dateStart dateState = iota // 0 + dateDigit + dateYearDash + dateYearDashAlphaDash + dateYearDashDash + dateYearDashDashWs // 5 + dateYearDashDashT + dateDigitDash + dateDigitDashAlpha + dateDigitDashAlphaDash + dateDigitDot // 10 + dateDigitDotDot + dateDigitSlash + dateDigitChineseYear + dateDigitChineseYearWs + dateDigitWs // 15 + dateDigitWsMoYear + dateDigitWsMolong + dateAlpha + dateAlphaWs + dateAlphaWsDigit // 20 + dateAlphaWsDigitMore + dateAlphaWsDigitMoreWs + dateAlphaWsDigitMoreWsYear + dateAlphaWsMonth + dateAlphaWsMonthMore + dateAlphaWsMonthSuffix + dateAlphaWsMore + dateAlphaWsAtTime + dateAlphaWsAlpha + dateAlphaWsAlphaYearmaybe + dateAlphaPeriodWsDigit + dateWeekdayComma + dateWeekdayAbbrevComma +) +const ( + // Time state + timeIgnore timeState = iota // 0 + timeStart + timeWs + timeWsAlpha + timeWsAlphaWs + timeWsAlphaZoneOffset // 5 + timeWsAlphaZoneOffsetWs + timeWsAlphaZoneOffsetWsYear + timeWsAlphaZoneOffsetWsExtra + timeWsAMPMMaybe + timeWsAMPM // 10 + timeWsOffset + timeWsOffsetWs // 12 + timeWsOffsetColonAlpha + timeWsOffsetColon + timeWsYear // 15 + timeOffset + timeOffsetColon + timeAlpha + timePeriod + timePeriodOffset // 20 + timePeriodOffsetColon + timePeriodOffsetColonWs + timePeriodWs + timePeriodWsAlpha + timePeriodWsOffset // 25 + timePeriodWsOffsetWs + timePeriodWsOffsetWsAlpha + timePeriodWsOffsetColon + timePeriodWsOffsetColonAlpha + timeZ + timeZDigit +) + +var ( + // ErrAmbiguousMMDD for date formats such as 04/02/2014 the mm/dd vs dd/mm are + // ambiguous, so it is an error for strict parse rules. + ErrAmbiguousMMDD = fmt.Errorf("This date has ambiguous mm/dd vs dd/mm type format") +) + +func unknownErr(datestr string) error { + return fmt.Errorf("Could not find format for %q", datestr) +} + +// ParseAny parse an unknown date format, detect the layout. +// Normal parse. Equivalent Timezone rules as time.Parse(). +// NOTE: please see readme on mmdd vs ddmm ambiguous dates. +func ParseAny(datestr string) (time.Time, error) { + p, err := parseTime(datestr, nil) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// ParseIn with Location, equivalent to time.ParseInLocation() timezone/offset +// rules. Using location arg, if timezone/offset info exists in the +// datestring, it uses the given location rules for any zone interpretation. +// That is, MST means one thing when using America/Denver and something else +// in other locations. +func ParseIn(datestr string, loc *time.Location) (time.Time, error) { + p, err := parseTime(datestr, loc) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// ParseLocal Given an unknown date format, detect the layout, +// using time.Local, parse. +// +// Set Location to time.Local. Same as ParseIn Location but lazily uses +// the global time.Local variable for Location argument. +// +// denverLoc, _ := time.LoadLocation("America/Denver") +// time.Local = denverLoc +// +// t, err := dateparse.ParseLocal("3/1/2014") +// +// Equivalent to: +// +// t, err := dateparse.ParseIn("3/1/2014", denverLoc) +// +func ParseLocal(datestr string) (time.Time, error) { + p, err := parseTime(datestr, time.Local) + if err != nil { + return time.Time{}, err + } + return p.parse() +} + +// MustParse parse a date, and panic if it can't be parsed. Used for testing. +// Not recommended for most use-cases. +func MustParse(datestr string) time.Time { + p, err := parseTime(datestr, nil) + if err != nil { + panic(err.Error()) + } + t, err := p.parse() + if err != nil { + panic(err.Error()) + } + return t +} + +// ParseFormat parse's an unknown date-time string and returns a layout +// string that can parse this (and exact same format) other date-time strings. +// +// layout, err := dateparse.ParseFormat("2013-02-01 00:00:00") +// // layout = "2006-01-02 15:04:05" +// +func ParseFormat(datestr string) (string, error) { + p, err := parseTime(datestr, nil) + if err != nil { + return "", err + } + _, err = p.parse() + if err != nil { + return "", err + } + return string(p.format), nil +} + +// ParseStrict parse an unknown date format. IF the date is ambigous +// mm/dd vs dd/mm then return an error. These return errors: 3.3.2014 , 8/8/71 etc +func ParseStrict(datestr string) (time.Time, error) { + p, err := parseTime(datestr, nil) + if err != nil { + return time.Time{}, err + } + if p.ambiguousMD { + return time.Time{}, ErrAmbiguousMMDD + } + return p.parse() +} + +func parseTime(datestr string, loc *time.Location) (*parser, error) { + + p := newParser(datestr, loc) + i := 0 + + // General strategy is to read rune by rune through the date looking for + // certain hints of what type of date we are dealing with. + // Hopefully we only need to read about 5 or 6 bytes before + // we figure it out and then attempt a parse +iterRunes: + for ; i < len(datestr); i++ { + //r := rune(datestr[i]) + r, bytesConsumed := utf8.DecodeRuneInString(datestr[i:]) + if bytesConsumed > 1 { + i += (bytesConsumed - 1) + } + + //gou.Debugf("i=%d r=%s state=%d %s", i, string(r), p.stateDate, datestr) + switch p.stateDate { + case dateStart: + if unicode.IsDigit(r) { + p.stateDate = dateDigit + } else if unicode.IsLetter(r) { + p.stateDate = dateAlpha + } else { + return nil, unknownErr(datestr) + } + case dateDigit: + + switch r { + case '-', '\u2212': + // 2006-01-02 + // 2013-Feb-03 + // 13-Feb-03 + // 29-Jun-2016 + if i == 4 { + p.stateDate = dateYearDash + p.yeari = 0 + p.yearlen = i + p.moi = i + 1 + p.set(0, "2006") + } else { + p.stateDate = dateDigitDash + } + case '/': + // 03/31/2005 + // 2014/02/24 + p.stateDate = dateDigitSlash + if i == 4 { + p.yearlen = i + p.moi = i + 1 + p.setYear() + } else { + p.ambiguousMD = true + if p.preferMonthFirst { + if p.molen == 0 { + p.molen = i + p.setMonth() + p.dayi = i + 1 + } + } + } + + case '.': + // 3.31.2014 + // 08.21.71 + // 2014.05 + p.stateDate = dateDigitDot + if i == 4 { + p.yearlen = i + p.moi = i + 1 + p.setYear() + } else { + p.ambiguousMD = true + p.moi = 0 + p.molen = i + p.setMonth() + p.dayi = i + 1 + } + + case ' ': + // 18 January 2018 + // 8 January 2018 + // 8 jan 2018 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + p.stateDate = dateDigitWs + p.dayi = 0 + p.daylen = i + case '年': + // Chinese Year + p.stateDate = dateDigitChineseYear + case ',': + return nil, unknownErr(datestr) + default: + continue + } + p.part1Len = i + + case dateYearDash: + // dateYearDashDashT + // 2006-01-02T15:04:05Z07:00 + // dateYearDashDashWs + // 2013-04-01 22:43:22 + // dateYearDashAlphaDash + // 2013-Feb-03 + switch r { + case '-': + p.molen = i - p.moi + p.dayi = i + 1 + p.stateDate = dateYearDashDash + p.setMonth() + default: + if unicode.IsLetter(r) { + p.stateDate = dateYearDashAlphaDash + } + } + + case dateYearDashDash: + // dateYearDashDashT + // 2006-01-02T15:04:05Z07:00 + // dateYearDashDashWs + // 2013-04-01 22:43:22 + switch r { + case ' ': + p.daylen = i - p.dayi + p.stateDate = dateYearDashDashWs + p.stateTime = timeStart + p.setDay() + break iterRunes + case 'T': + p.daylen = i - p.dayi + p.stateDate = dateYearDashDashT + p.stateTime = timeStart + p.setDay() + break iterRunes + } + case dateYearDashAlphaDash: + // 2013-Feb-03 + switch r { + case '-': + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.dayi = i + 1 + } + case dateDigitDash: + // 13-Feb-03 + // 29-Jun-2016 + if unicode.IsLetter(r) { + p.stateDate = dateDigitDashAlpha + p.moi = i + } else { + return nil, unknownErr(datestr) + } + case dateDigitDashAlpha: + // 13-Feb-03 + // 28-Feb-03 + // 29-Jun-2016 + switch r { + case '-': + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.yeari = i + 1 + p.stateDate = dateDigitDashAlphaDash + } + + case dateDigitDashAlphaDash: + // 13-Feb-03 ambiguous + // 28-Feb-03 ambiguous + // 29-Jun-2016 + switch r { + case ' ': + // we need to find if this was 4 digits, aka year + // or 2 digits which makes it ambiguous year/day + length := i - (p.moi + p.molen + 1) + if length == 4 { + p.yearlen = 4 + p.set(p.yeari, "2006") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } else if length == 2 { + // We have no idea if this is + // yy-mon-dd OR dd-mon-yy + // + // We are going to ASSUME (bad, bad) that it is dd-mon-yy which is a horible assumption + p.ambiguousMD = true + p.yearlen = 2 + p.set(p.yeari, "06") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } + p.stateTime = timeStart + break iterRunes + } + + case dateDigitSlash: + // 2014/07/10 06:55:38.156283 + // 03/19/2012 10:11:59 + // 04/2/2014 03:00:37 + // 3/1/2012 10:11:59 + // 4/8/2014 22:05 + // 3/1/2014 + // 10/13/2014 + // 01/02/2006 + // 1/2/06 + + switch r { + case ' ': + p.stateTime = timeStart + if p.yearlen == 0 { + p.yearlen = i - p.yeari + p.setYear() + } else if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + } + break iterRunes + case '/': + if p.yearlen > 0 { + // 2014/07/10 06:55:38.156283 + if p.molen == 0 { + p.molen = i - p.moi + p.setMonth() + p.dayi = i + 1 + } + } else if p.preferMonthFirst { + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + } + } + } + + case dateDigitWs: + // 18 January 2018 + // 8 January 2018 + // 8 jan 2018 + // 1 jan 18 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + switch r { + case ' ': + p.yeari = i + 1 + //p.yearlen = 4 + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + p.stateTime = timeStart + if i > p.daylen+len(" Sep") { // November etc + // If len greather than space + 3 it must be full month + p.stateDate = dateDigitWsMolong + } else { + // If len=3, the might be Feb or May? Ie ambigous abbreviated but + // we can parse may with either. BUT, that means the + // format may not be correct? + // mo := strings.ToLower(datestr[p.daylen+1 : i]) + p.moi = p.daylen + 1 + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.stateDate = dateDigitWsMoYear + } + } + + case dateDigitWsMoYear: + // 8 jan 2018 + // 02 Jan 2018 23:59 + // 02 Jan 2018 23:59:34 + // 12 Feb 2006, 19:17 + // 12 Feb 2006, 19:17:22 + switch r { + case ',': + p.yearlen = i - p.yeari + p.setYear() + i++ + break iterRunes + case ' ': + p.yearlen = i - p.yeari + p.setYear() + break iterRunes + } + case dateDigitWsMolong: + // 18 January 2018 + // 8 January 2018 + + case dateDigitChineseYear: + // dateDigitChineseYear + // 2014年04月08日 + // weekday %Y年%m月%e日 %A %I:%M %p + // 2013年07月18日 星期四 10:27 上午 + if r == ' ' { + p.stateDate = dateDigitChineseYearWs + break + } + case dateDigitDot: + // This is the 2nd period + // 3.31.2014 + // 08.21.71 + // 2014.05 + // 2018.09.30 + if r == '.' { + if p.moi == 0 { + // 3.31.2014 + p.daylen = i - p.dayi + p.yeari = i + 1 + p.setDay() + p.stateDate = dateDigitDotDot + } else { + // 2018.09.30 + //p.molen = 2 + p.molen = i - p.moi + p.dayi = i + 1 + p.setMonth() + p.stateDate = dateDigitDotDot + } + } + case dateDigitDotDot: + // iterate all the way through + case dateAlpha: + // dateAlphaWS + // Mon Jan _2 15:04:05 2006 + // Mon Jan _2 15:04:05 MST 2006 + // Mon Jan 02 15:04:05 -0700 2006 + // Mon Aug 10 15:44:11 UTC+0100 2015 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + // dateAlphaWSDigit + // May 8, 2009 5:57:51 PM + // oct 1, 1970 + // dateAlphaWsMonth + // April 8, 2009 + // dateAlphaWsMore + // dateAlphaWsAtTime + // January 02, 2006 at 3:04pm MST-07 + // + // dateAlphaPeriodWsDigit + // oct. 1, 1970 + // dateWeekdayComma + // Monday, 02 Jan 2006 15:04:05 MST + // Monday, 02-Jan-06 15:04:05 MST + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // dateWeekdayAbbrevComma + // Mon, 02 Jan 2006 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 -0700 + // Thu, 13 Jul 2017 08:58:40 +0100 + // Tue, 11 Jul 2017 16:28:13 +0200 (CEST) + // Mon, 02-Jan-06 15:04:05 MST + switch { + case r == ' ': + // X + // April 8, 2009 + if i > 3 { + // Check to see if the alpha is name of month? or Day? + month := strings.ToLower(datestr[0:i]) + if isMonthFull(month) { + p.fullMonth = month + // len(" 31, 2018") = 9 + if len(datestr[i:]) < 10 { + // April 8, 2009 + p.stateDate = dateAlphaWsMonth + } else { + p.stateDate = dateAlphaWsMore + } + p.dayi = i + 1 + break + } + + } else { + // This is possibly ambiguous? May will parse as either though. + // So, it could return in-correct format. + // May 05, 2005, 05:05:05 + // May 05 2005, 05:05:05 + // Jul 05, 2005, 05:05:05 + p.stateDate = dateAlphaWs + } + + case r == ',': + // Mon, 02 Jan 2006 + // p.moi = 0 + // p.molen = i + if i == 3 { + p.stateDate = dateWeekdayAbbrevComma + p.set(0, "Mon") + } else { + p.stateDate = dateWeekdayComma + p.skip = i + 2 + i++ + // TODO: lets just make this "skip" as we don't need + // the mon, monday, they are all superfelous and not needed + // just lay down the skip, no need to fill and then skip + } + case r == '.': + // sept. 28, 2017 + // jan. 28, 2017 + p.stateDate = dateAlphaPeriodWsDigit + if i == 3 { + p.molen = i + p.set(0, "Jan") + } else if i == 4 { + // gross + datestr = datestr[0:i-1] + datestr[i:] + return parseTime(datestr, loc) + } else { + return nil, unknownErr(datestr) + } + } + + case dateAlphaWs: + // dateAlphaWsAlpha + // Mon Jan _2 15:04:05 2006 + // Mon Jan _2 15:04:05 MST 2006 + // Mon Jan 02 15:04:05 -0700 2006 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + // Mon Aug 10 15:44:11 UTC+0100 2015 + // dateAlphaWsDigit + // May 8, 2009 5:57:51 PM + // May 8 2009 5:57:51 PM + // oct 1, 1970 + // oct 7, '70 + switch { + case unicode.IsLetter(r): + p.set(0, "Mon") + p.stateDate = dateAlphaWsAlpha + p.set(i, "Jan") + case unicode.IsDigit(r): + p.set(0, "Jan") + p.stateDate = dateAlphaWsDigit + p.dayi = i + } + + case dateAlphaWsDigit: + // May 8, 2009 5:57:51 PM + // May 8 2009 5:57:51 PM + // oct 1, 1970 + // oct 7, '70 + // oct. 7, 1970 + if r == ',' { + p.daylen = i - p.dayi + p.setDay() + p.stateDate = dateAlphaWsDigitMore + } else if r == ' ' { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + p.stateDate = dateAlphaWsDigitMoreWs + } else if unicode.IsLetter(r) { + p.stateDate = dateAlphaWsMonthSuffix + i-- + } + case dateAlphaWsDigitMore: + // x + // May 8, 2009 5:57:51 PM + // May 05, 2005, 05:05:05 + // May 05 2005, 05:05:05 + // oct 1, 1970 + // oct 7, '70 + if r == ' ' { + p.yeari = i + 1 + p.stateDate = dateAlphaWsDigitMoreWs + } + case dateAlphaWsDigitMoreWs: + // x + // May 8, 2009 5:57:51 PM + // May 05, 2005, 05:05:05 + // oct 1, 1970 + // oct 7, '70 + switch r { + case '\'': + p.yeari = i + 1 + case ' ', ',': + // x + // May 8, 2009 5:57:51 PM + // x + // May 8, 2009, 5:57:51 PM + p.stateDate = dateAlphaWsDigitMoreWsYear + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + + case dateAlphaWsAlpha: + // Mon Jan _2 15:04:05 2006 + // Mon Jan 02 15:04:05 -0700 2006 + // Mon Jan _2 15:04:05 MST 2006 + // Mon Aug 10 15:44:11 UTC+0100 2015 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + if r == ' ' { + if p.dayi > 0 { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + p.stateDate = dateAlphaWsAlphaYearmaybe + p.stateTime = timeStart + } + } else if unicode.IsDigit(r) { + if p.dayi == 0 { + p.dayi = i + } + } + + case dateAlphaWsAlphaYearmaybe: + // x + // Mon Jan _2 15:04:05 2006 + // Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) + if r == ':' { + i = i - 3 + p.stateDate = dateAlphaWsAlpha + p.yeari = 0 + break iterRunes + } else if r == ' ' { + // must be year format, not 15:04 + p.yearlen = i - p.yeari + p.setYear() + break iterRunes + } + + case dateAlphaWsMonth: + // April 8, 2009 + // April 8 2009 + switch r { + case ' ', ',': + // x + // June 8, 2009 + // x + // June 8 2009 + if p.daylen == 0 { + p.daylen = i - p.dayi + p.setDay() + } + case 's', 'S', 'r', 'R', 't', 'T', 'n', 'N': + // st, rd, nd, st + i-- + p.stateDate = dateAlphaWsMonthSuffix + default: + if p.daylen > 0 && p.yeari == 0 { + p.yeari = i + } + } + case dateAlphaWsMonthMore: + // X + // January 02, 2006, 15:04:05 + // January 02 2006, 15:04:05 + // January 02, 2006 15:04:05 + // January 02 2006 15:04:05 + switch r { + case ',': + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + i++ + break iterRunes + case ' ': + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + case dateAlphaWsMonthSuffix: + // x + // April 8th, 2009 + // April 8th 2009 + switch r { + case 't', 'T': + if p.nextIs(i, 'h') || p.nextIs(i, 'H') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc) + } + } + case 'n', 'N': + if p.nextIs(i, 'd') || p.nextIs(i, 'D') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc) + } + } + case 's', 'S': + if p.nextIs(i, 't') || p.nextIs(i, 'T') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc) + } + } + case 'r', 'R': + if p.nextIs(i, 'd') || p.nextIs(i, 'D') { + if len(datestr) > i+2 { + return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc) + } + } + } + case dateAlphaWsMore: + // January 02, 2006, 15:04:05 + // January 02 2006, 15:04:05 + // January 2nd, 2006, 15:04:05 + // January 2nd 2006, 15:04:05 + // September 17, 2012 at 5:00pm UTC-05 + switch { + case r == ',': + // x + // January 02, 2006, 15:04:05 + if p.nextIs(i, ' ') { + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 2 + p.stateDate = dateAlphaWsMonthMore + i++ + } + + case r == ' ': + // x + // January 02 2006, 15:04:05 + p.daylen = i - p.dayi + p.setDay() + p.yeari = i + 1 + p.stateDate = dateAlphaWsMonthMore + case unicode.IsDigit(r): + // XX + // January 02, 2006, 15:04:05 + continue + case unicode.IsLetter(r): + // X + // January 2nd, 2006, 15:04:05 + p.daylen = i - p.dayi + p.setDay() + p.stateDate = dateAlphaWsMonthSuffix + i-- + } + + case dateAlphaPeriodWsDigit: + // oct. 7, '70 + switch { + case r == ' ': + // continue + case unicode.IsDigit(r): + p.stateDate = dateAlphaWsDigit + p.dayi = i + default: + return p, unknownErr(datestr) + } + case dateWeekdayComma: + // Monday, 02 Jan 2006 15:04:05 MST + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // Monday, 02-Jan-06 15:04:05 MST + if p.dayi == 0 { + p.dayi = i + } + switch r { + case ' ', '-': + if p.moi == 0 { + p.moi = i + 1 + p.daylen = i - p.dayi + p.setDay() + } else if p.yeari == 0 { + p.yeari = i + 1 + p.molen = i - p.moi + p.set(p.moi, "Jan") + } else { + p.stateTime = timeStart + break iterRunes + } + } + case dateWeekdayAbbrevComma: + // Mon, 02 Jan 2006 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 -0700 + // Thu, 13 Jul 2017 08:58:40 +0100 + // Thu, 4 Jan 2018 17:53:36 +0000 + // Tue, 11 Jul 2017 16:28:13 +0200 (CEST) + // Mon, 02-Jan-06 15:04:05 MST + switch r { + case ' ', '-': + if p.dayi == 0 { + p.dayi = i + 1 + } else if p.moi == 0 { + p.daylen = i - p.dayi + p.setDay() + p.moi = i + 1 + } else if p.yeari == 0 { + p.molen = i - p.moi + p.set(p.moi, "Jan") + p.yeari = i + 1 + } else { + p.yearlen = i - p.yeari + p.setYear() + p.stateTime = timeStart + break iterRunes + } + } + + default: + break iterRunes + } + } + p.coalesceDate(i) + if p.stateTime == timeStart { + // increment first one, since the i++ occurs at end of loop + if i < len(p.datestr) { + i++ + } + // ensure we skip any whitespace prefix + for ; i < len(datestr); i++ { + r := rune(datestr[i]) + if r != ' ' { + break + } + } + + iterTimeRunes: + for ; i < len(datestr); i++ { + r := rune(datestr[i]) + + //gou.Debugf("%d %s %d iterTimeRunes %s %s", i, string(r), p.stateTime, p.ds(), p.ts()) + + switch p.stateTime { + case timeStart: + // 22:43:22 + // 22:43 + // timeComma + // 08:20:13,787 + // timeWs + // 05:24:37 PM + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 00:12:00 +0000 UTC + // 22:18:00 +0000 UTC m=+0.000000001 + // 15:04:05 -0700 + // 15:04:05 -07:00 + // 15:04:05 2008 + // timeOffset + // 03:21:51+00:00 + // 19:55:00+0100 + // timePeriod + // 17:24:37.3186369 + // 00:07:31.945167 + // 18:31:59.257000000 + // 00:00:00.000 + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // 00:00:00.000 +0000 UTC + // timePeriodWsAlpha + // 06:20:00.000 UTC + if p.houri == 0 { + p.houri = i + } + switch r { + case ',': + // hm, lets just swap out comma for period. for some reason go + // won't parse it. + // 2014-05-11 08:20:13,787 + ds := []byte(p.datestr) + ds[i] = '.' + return parseTime(string(ds), loc) + case '-', '+': + // 03:21:51+00:00 + p.stateTime = timeOffset + if p.seci == 0 { + // 22:18+0530 + p.minlen = i - p.mini + } else { + p.seclen = i - p.seci + } + p.offseti = i + case '.': + p.stateTime = timePeriod + p.seclen = i - p.seci + p.msi = i + 1 + case 'Z': + p.stateTime = timeZ + if p.seci == 0 { + p.minlen = i - p.mini + } else { + p.seclen = i - p.seci + } + case 'a', 'A': + if p.nextIs(i, 't') || p.nextIs(i, 'T') { + // x + // September 17, 2012 at 5:00pm UTC-05 + i++ // skip t + if p.nextIs(i, ' ') { + // x + // September 17, 2012 at 5:00pm UTC-05 + i++ // skip ' + p.houri = 0 // reset hour + } + } else { + switch { + case r == 'a' && p.nextIs(i, 'm'): + p.coalesceTime(i) + p.set(i, "am") + case r == 'A' && p.nextIs(i, 'M'): + p.coalesceTime(i) + p.set(i, "PM") + } + } + + case 'p', 'P': + // Could be AM/PM + switch { + case r == 'p' && p.nextIs(i, 'm'): + p.coalesceTime(i) + p.set(i, "pm") + case r == 'P' && p.nextIs(i, 'M'): + p.coalesceTime(i) + p.set(i, "PM") + } + case ' ': + p.coalesceTime(i) + p.stateTime = timeWs + case ':': + if p.mini == 0 { + p.mini = i + 1 + p.hourlen = i - p.houri + } else if p.seci == 0 { + p.seci = i + 1 + p.minlen = i - p.mini + } + } + case timeOffset: + // 19:55:00+0100 + // timeOffsetColon + // 15:04:05+07:00 + // 15:04:05-07:00 + if r == ':' { + p.stateTime = timeOffsetColon + } + case timeWs: + // timeWsAlpha + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 15:44:11 UTC+0100 2015 + // 18:04:07 GMT+0100 (GMT Daylight Time) + // 17:57:51 MST 2009 + // timeWsAMPMMaybe + // 05:24:37 PM + // timeWsOffset + // 15:04:05 -0700 + // 00:12:00 +0000 UTC + // timeWsOffsetColon + // 15:04:05 -07:00 + // 17:57:51 -0700 2009 + // timeWsOffsetColonAlpha + // 00:12:00 +00:00 UTC + // timeWsYear + // 00:12:00 2008 + // timeZ + // 15:04:05.99Z + switch r { + case 'A', 'P': + // Could be AM/PM or could be PST or similar + p.tzi = i + p.stateTime = timeWsAMPMMaybe + case '+', '-': + p.offseti = i + p.stateTime = timeWsOffset + default: + if unicode.IsLetter(r) { + // 06:20:00 UTC + // 06:20:00 UTC-05 + // 15:44:11 UTC+0100 2015 + // 17:57:51 MST 2009 + p.tzi = i + p.stateTime = timeWsAlpha + //break iterTimeRunes + } else if unicode.IsDigit(r) { + // 00:12:00 2008 + p.stateTime = timeWsYear + p.yeari = i + } + } + case timeWsAlpha: + // 06:20:00 UTC + // 06:20:00 UTC-05 + // timeWsAlphaWs + // 17:57:51 MST 2009 + // timeWsAlphaZoneOffset + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + switch r { + case '+', '-': + p.tzlen = i - p.tzi + if p.tzlen == 4 { + p.set(p.tzi, " MST") + } else if p.tzlen == 3 { + p.set(p.tzi, "MST") + } + p.stateTime = timeWsAlphaZoneOffset + p.offseti = i + case ' ': + // 17:57:51 MST 2009 + p.tzlen = i - p.tzi + if p.tzlen == 4 { + p.set(p.tzi, " MST") + } else if p.tzlen == 3 { + p.set(p.tzi, "MST") + } + p.stateTime = timeWsAlphaWs + p.yeari = i + 1 + } + case timeWsAlphaWs: + // 17:57:51 MST 2009 + + case timeWsAlphaZoneOffset: + // 06:20:00 UTC-05 + // timeWsAlphaZoneOffset + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + switch r { + case ' ': + p.set(p.offseti, "-0700") + p.yeari = i + 1 + p.stateTime = timeWsAlphaZoneOffsetWs + } + case timeWsAlphaZoneOffsetWs: + // timeWsAlphaZoneOffsetWs + // timeWsAlphaZoneOffsetWsExtra + // 18:04:07 GMT+0100 (GMT Daylight Time) + // timeWsAlphaZoneOffsetWsYear + // 15:44:11 UTC+0100 2015 + if unicode.IsDigit(r) { + p.stateTime = timeWsAlphaZoneOffsetWsYear + } else { + p.extra = i - 1 + p.stateTime = timeWsAlphaZoneOffsetWsExtra + } + case timeWsAlphaZoneOffsetWsYear: + // 15:44:11 UTC+0100 2015 + if unicode.IsDigit(r) { + p.yearlen = i - p.yeari + 1 + if p.yearlen == 4 { + p.setYear() + } + } + case timeWsAMPMMaybe: + // timeWsAMPMMaybe + // timeWsAMPM + // 05:24:37 PM + // timeWsAlpha + // 00:12:00 PST + // 15:44:11 UTC+0100 2015 + if r == 'M' { + //return parse("2006-01-02 03:04:05 PM", datestr, loc) + p.stateTime = timeWsAMPM + p.set(i-1, "PM") + if p.hourlen == 2 { + p.set(p.houri, "03") + } else if p.hourlen == 1 { + p.set(p.houri, "3") + } + } else { + p.stateTime = timeWsAlpha + } + + case timeWsOffset: + // timeWsOffset + // 15:04:05 -0700 + // timeWsOffsetWsOffset + // 17:57:51 -0700 -07 + // timeWsOffsetWs + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + // timeWsOffsetColon + // 15:04:05 -07:00 + // timeWsOffsetColonAlpha + // 00:12:00 +00:00 UTC + switch r { + case ':': + p.stateTime = timeWsOffsetColon + case ' ': + p.set(p.offseti, "-0700") + p.yeari = i + 1 + p.stateTime = timeWsOffsetWs + } + case timeWsOffsetWs: + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // w Extra + // 17:57:51 -0700 -07 + switch r { + case '=': + // eff you golang + if datestr[i-1] == 'm' { + p.extra = i - 2 + p.trimExtra() + break + } + case '+', '-': + // This really doesn't seem valid, but for some reason when round-tripping a go date + // their is an extra +03 printed out. seems like go bug to me, but, parsing anyway. + // 00:00:00 +0300 +03 + // 00:00:00 +0300 +0300 + p.extra = i - 1 + p.stateTime = timeWsOffset + p.trimExtra() + break + default: + switch { + case unicode.IsDigit(r): + p.yearlen = i - p.yeari + 1 + if p.yearlen == 4 { + p.setYear() + } + case unicode.IsLetter(r): + if p.tzi == 0 { + p.tzi = i + } + } + } + + case timeWsOffsetColon: + // timeWsOffsetColon + // 15:04:05 -07:00 + // timeWsOffsetColonAlpha + // 2015-02-18 00:12:00 +00:00 UTC + if unicode.IsLetter(r) { + // 2015-02-18 00:12:00 +00:00 UTC + p.stateTime = timeWsOffsetColonAlpha + break iterTimeRunes + } + case timePeriod: + // 15:04:05.999999999+07:00 + // 15:04:05.999999999-07:00 + // 15:04:05.999999+07:00 + // 15:04:05.999999-07:00 + // 15:04:05.999+07:00 + // 15:04:05.999-07:00 + // timePeriod + // 17:24:37.3186369 + // 00:07:31.945167 + // 18:31:59.257000000 + // 00:00:00.000 + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // With Extra + // 00:00:00.000 +0300 +03 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 22:18:00.001 +0000 UTC m=+0.000000001 + // timePeriodWsAlpha + // 06:20:00.000 UTC + switch r { + case ' ': + p.mslen = i - p.msi + p.stateTime = timePeriodWs + case '+', '-': + // This really shouldn't happen + p.mslen = i - p.msi + p.offseti = i + p.stateTime = timePeriodOffset + default: + if unicode.IsLetter(r) { + // 06:20:00.000 UTC + p.mslen = i - p.msi + p.stateTime = timePeriodWsAlpha + } + } + case timePeriodOffset: + // timePeriodOffset + // 19:55:00.799+0100 + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // 13:31:51.999-07:00 MST + if r == ':' { + p.stateTime = timePeriodOffsetColon + } + case timePeriodOffsetColon: + // timePeriodOffset + // timePeriodOffsetColon + // 15:04:05.999-07:00 + // 13:31:51.999 -07:00 MST + switch r { + case ' ': + p.set(p.offseti, "-07:00") + p.stateTime = timePeriodOffsetColonWs + p.tzi = i + 1 + } + case timePeriodOffsetColonWs: + // continue + case timePeriodWs: + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // timePeriodWsOffsetColon + // 13:31:51.999 -07:00 MST + // timePeriodWsAlpha + // 06:20:00.000 UTC + if p.offseti == 0 { + p.offseti = i + } + switch r { + case '+', '-': + p.mslen = i - p.msi - 1 + p.stateTime = timePeriodWsOffset + default: + if unicode.IsLetter(r) { + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + p.stateTime = timePeriodWsOffsetWsAlpha + break iterTimeRunes + } + } + + case timePeriodWsOffset: + // timePeriodWs + // timePeriodWsOffset + // 00:07:31.945167 +0000 + // 00:00:00.000 +0000 + // With Extra + // 00:00:00.000 +0300 +03 + // timePeriodWsOffsetAlpha + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 03:02:00.001 +0300 MSK m=+0.000000001 + // timePeriodWsOffsetColon + // 13:31:51.999 -07:00 MST + // timePeriodWsAlpha + // 06:20:00.000 UTC + switch r { + case ':': + p.stateTime = timePeriodWsOffsetColon + case ' ': + p.set(p.offseti, "-0700") + case '+', '-': + // This really doesn't seem valid, but for some reason when round-tripping a go date + // their is an extra +03 printed out. seems like go bug to me, but, parsing anyway. + // 00:00:00.000 +0300 +03 + // 00:00:00.000 +0300 +0300 + p.extra = i - 1 + p.trimExtra() + break + default: + if unicode.IsLetter(r) { + // 00:07:31.945167 +0000 UTC + // 00:00:00.000 +0000 UTC + // 03:02:00.001 +0300 MSK m=+0.000000001 + p.stateTime = timePeriodWsOffsetWsAlpha + } + } + case timePeriodWsOffsetWsAlpha: + // 03:02:00.001 +0300 MSK m=+0.000000001 + // eff you golang + if r == '=' && datestr[i-1] == 'm' { + p.extra = i - 2 + p.trimExtra() + break + } + + case timePeriodWsOffsetColon: + // 13:31:51.999 -07:00 MST + switch r { + case ' ': + p.set(p.offseti, "-07:00") + default: + if unicode.IsLetter(r) { + // 13:31:51.999 -07:00 MST + p.tzi = i + p.stateTime = timePeriodWsOffsetColonAlpha + } + } + case timePeriodWsOffsetColonAlpha: + // continue + case timeZ: + // timeZ + // 15:04:05.99Z + // With a time-zone at end after Z + // 2006-01-02T15:04:05.999999999Z07:00 + // 2006-01-02T15:04:05Z07:00 + // RFC3339 = "2006-01-02T15:04:05Z07:00" + // RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00" + if unicode.IsDigit(r) { + p.stateTime = timeZDigit + } + + } + } + + switch p.stateTime { + case timeWsAlphaWs: + p.yearlen = i - p.yeari + p.setYear() + case timeWsYear: + p.yearlen = i - p.yeari + p.setYear() + case timeWsAlphaZoneOffsetWsExtra: + p.trimExtra() + case timeWsAlphaZoneOffset: + // 06:20:00 UTC-05 + if i-p.offseti < 4 { + p.set(p.offseti, "-07") + } else { + p.set(p.offseti, "-0700") + } + + case timePeriod: + p.mslen = i - p.msi + case timeOffset: + // 19:55:00+0100 + p.set(p.offseti, "-0700") + case timeWsOffset: + p.set(p.offseti, "-0700") + case timeWsOffsetWs: + // 17:57:51 -0700 2009 + // 00:12:00 +0000 UTC + case timeWsOffsetColon: + // 17:57:51 -07:00 + p.set(p.offseti, "-07:00") + case timeOffsetColon: + // 15:04:05+07:00 + p.set(p.offseti, "-07:00") + case timePeriodOffset: + // 19:55:00.799+0100 + p.set(p.offseti, "-0700") + case timePeriodOffsetColon: + p.set(p.offseti, "-07:00") + case timePeriodWsOffsetColonAlpha: + p.tzlen = i - p.tzi + switch p.tzlen { + case 3: + p.set(p.tzi, "MST") + case 4: + p.set(p.tzi, "MST ") + } + case timePeriodWsOffset: + p.set(p.offseti, "-0700") + } + p.coalesceTime(i) + } + + switch p.stateDate { + case dateDigit: + // unixy timestamps ish + // example ct type + // 1499979655583057426 19 nanoseconds + // 1499979795437000 16 micro-seconds + // 20180722105203 14 yyyyMMddhhmmss + // 1499979795437 13 milliseconds + // 1332151919 10 seconds + // 20140601 8 yyyymmdd + // 2014 4 yyyy + t := time.Time{} + if len(datestr) == len("1499979655583057426") { // 19 + // nano-seconds + if nanoSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, nanoSecs) + } + } else if len(datestr) == len("1499979795437000") { // 16 + // micro-seconds + if microSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, microSecs*1000) + } + } else if len(datestr) == len("yyyyMMddhhmmss") { // 14 + // yyyyMMddhhmmss + p.format = []byte("20060102150405") + return p, nil + } else if len(datestr) == len("1332151919000") { // 13 + if miliSecs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(0, miliSecs*1000*1000) + } + } else if len(datestr) == len("1332151919") { //10 + if secs, err := strconv.ParseInt(datestr, 10, 64); err == nil { + t = time.Unix(secs, 0) + } + } else if len(datestr) == len("20140601") { + p.format = []byte("20060102") + return p, nil + } else if len(datestr) == len("2014") { + p.format = []byte("2006") + return p, nil + } else if len(datestr) < 4 { + return nil, fmt.Errorf("unrecognized format, too short %v", datestr) + } + if !t.IsZero() { + if loc == nil { + p.t = &t + return p, nil + } + t = t.In(loc) + p.t = &t + return p, nil + } + + case dateYearDash: + // 2006-01 + return p, nil + + case dateYearDashDash: + // 2006-01-02 + // 2006-1-02 + // 2006-1-2 + // 2006-01-2 + return p, nil + + case dateYearDashAlphaDash: + // 2013-Feb-03 + // 2013-Feb-3 + p.daylen = i - p.dayi + p.setDay() + return p, nil + + case dateYearDashDashWs: + // 2013-04-01 + return p, nil + + case dateYearDashDashT: + return p, nil + + case dateDigitDashAlphaDash: + // 13-Feb-03 ambiguous + // 28-Feb-03 ambiguous + // 29-Jun-2016 + length := len(datestr) - (p.moi + p.molen + 1) + if length == 4 { + p.yearlen = 4 + p.set(p.yeari, "2006") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } else if length == 2 { + // We have no idea if this is + // yy-mon-dd OR dd-mon-yy + // + // We are going to ASSUME (bad, bad) that it is dd-mon-yy which is a horible assumption + p.ambiguousMD = true + p.yearlen = 2 + p.set(p.yeari, "06") + // We now also know that part1 was the day + p.dayi = 0 + p.daylen = p.part1Len + p.setDay() + } + + return p, nil + + case dateDigitDot: + // 2014.05 + p.molen = i - p.moi + p.setMonth() + return p, nil + + case dateDigitDotDot: + // 03.31.1981 + // 3.31.2014 + // 3.2.1981 + // 3.2.81 + // 08.21.71 + // 2018.09.30 + return p, nil + + case dateDigitWsMoYear: + // 2 Jan 2018 + // 2 Jan 18 + // 2 Jan 2018 23:59 + // 02 Jan 2018 23:59 + // 12 Feb 2006, 19:17 + return p, nil + + case dateDigitWsMolong: + // 18 January 2018 + // 8 January 2018 + if p.daylen == 2 { + p.format = []byte("02 January 2006") + return p, nil + } + p.format = []byte("2 January 2006") + return p, nil // parse("2 January 2006", datestr, loc) + + case dateAlphaWsMonth: + p.yearlen = i - p.yeari + p.setYear() + return p, nil + + case dateAlphaWsMonthMore: + return p, nil + + case dateAlphaWsDigitMoreWs: + // oct 1, 1970 + p.yearlen = i - p.yeari + p.setYear() + return p, nil + + case dateAlphaWsDigitMoreWsYear: + // May 8, 2009 5:57:51 PM + // Jun 7, 2005, 05:57:51 + return p, nil + + case dateAlphaWsAlpha: + return p, nil + + case dateAlphaWsAlphaYearmaybe: + return p, nil + + case dateDigitSlash: + // 3/1/2014 + // 10/13/2014 + // 01/02/2006 + // 2014/10/13 + return p, nil + + case dateDigitChineseYear: + // dateDigitChineseYear + // 2014年04月08日 + p.format = []byte("2006年01月02日") + return p, nil + + case dateDigitChineseYearWs: + p.format = []byte("2006年01月02日 15:04:05") + return p, nil + + case dateWeekdayComma: + // Monday, 02 Jan 2006 15:04:05 -0700 + // Monday, 02 Jan 2006 15:04:05 +0100 + // Monday, 02-Jan-06 15:04:05 MST + return p, nil + + case dateWeekdayAbbrevComma: + // Mon, 02-Jan-06 15:04:05 MST + // Mon, 02 Jan 2006 15:04:05 MST + return p, nil + + } + + return nil, unknownErr(datestr) +} + +type parser struct { + loc *time.Location + preferMonthFirst bool + ambiguousMD bool + stateDate dateState + stateTime timeState + format []byte + datestr string + fullMonth string + skip int + extra int + part1Len int + yeari int + yearlen int + moi int + molen int + dayi int + daylen int + houri int + hourlen int + mini int + minlen int + seci int + seclen int + msi int + mslen int + offseti int + offsetlen int + tzi int + tzlen int + t *time.Time +} + +func newParser(dateStr string, loc *time.Location) *parser { + p := parser{ + stateDate: dateStart, + stateTime: timeIgnore, + datestr: dateStr, + loc: loc, + preferMonthFirst: true, + } + p.format = []byte(dateStr) + return &p +} + +func (p *parser) nextIs(i int, b byte) bool { + if len(p.datestr) > i+1 && p.datestr[i+1] == b { + return true + } + return false +} + +func (p *parser) set(start int, val string) { + if start < 0 { + return + } + if len(p.format) < start+len(val) { + return + } + for i, r := range val { + p.format[start+i] = byte(r) + } +} +func (p *parser) setMonth() { + if p.molen == 2 { + p.set(p.moi, "01") + } else if p.molen == 1 { + p.set(p.moi, "1") + } +} + +func (p *parser) setDay() { + if p.daylen == 2 { + p.set(p.dayi, "02") + } else if p.daylen == 1 { + p.set(p.dayi, "2") + } +} +func (p *parser) setYear() { + if p.yearlen == 2 { + p.set(p.yeari, "06") + } else if p.yearlen == 4 { + p.set(p.yeari, "2006") + } +} +func (p *parser) coalesceDate(end int) { + if p.yeari > 0 { + if p.yearlen == 0 { + p.yearlen = end - p.yeari + } + p.setYear() + } + if p.moi > 0 && p.molen == 0 { + p.molen = end - p.moi + p.setMonth() + } + if p.dayi > 0 && p.daylen == 0 { + p.daylen = end - p.dayi + p.setDay() + } +} +func (p *parser) ts() string { + return fmt.Sprintf("h:(%d:%d) m:(%d:%d) s:(%d:%d)", p.houri, p.hourlen, p.mini, p.minlen, p.seci, p.seclen) +} +func (p *parser) ds() string { + return fmt.Sprintf("%s d:(%d:%d) m:(%d:%d) y:(%d:%d)", p.datestr, p.dayi, p.daylen, p.moi, p.molen, p.yeari, p.yearlen) +} +func (p *parser) coalesceTime(end int) { + // 03:04:05 + // 15:04:05 + // 3:04:05 + // 3:4:5 + // 15:04:05.00 + if p.houri > 0 { + if p.hourlen == 2 { + p.set(p.houri, "15") + } else if p.hourlen == 1 { + p.set(p.houri, "3") + } + } + if p.mini > 0 { + if p.minlen == 0 { + p.minlen = end - p.mini + } + if p.minlen == 2 { + p.set(p.mini, "04") + } else { + p.set(p.mini, "4") + } + } + if p.seci > 0 { + if p.seclen == 0 { + p.seclen = end - p.seci + } + if p.seclen == 2 { + p.set(p.seci, "05") + } else { + p.set(p.seci, "5") + } + } + + if p.msi > 0 { + for i := 0; i < p.mslen; i++ { + p.format[p.msi+i] = '0' + } + } +} +func (p *parser) setFullMonth(month string) { + if p.moi == 0 { + p.format = []byte(fmt.Sprintf("%s%s", "January", p.format[len(month):])) + } +} + +func (p *parser) trimExtra() { + if p.extra > 0 && len(p.format) > p.extra { + p.format = p.format[0:p.extra] + p.datestr = p.datestr[0:p.extra] + } +} + +// func (p *parser) remove(i, length int) { +// if len(p.format) > i+length { +// //append(a[:i], a[j:]...) +// p.format = append(p.format[0:i], p.format[i+length:]...) +// } +// if len(p.datestr) > i+length { +// //append(a[:i], a[j:]...) +// p.datestr = fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+length:]) +// } +// } + +func (p *parser) parse() (time.Time, error) { + if p.t != nil { + return *p.t, nil + } + if len(p.fullMonth) > 0 { + p.setFullMonth(p.fullMonth) + } + if p.skip > 0 && len(p.format) > p.skip { + p.format = p.format[p.skip:] + p.datestr = p.datestr[p.skip:] + } + //gou.Debugf("parse %q AS %q", p.datestr, string(p.format)) + if p.loc == nil { + return time.Parse(string(p.format), p.datestr) + } + return time.ParseInLocation(string(p.format), p.datestr, p.loc) +} +func isMonthFull(alpha string) bool { + for _, month := range months { + if alpha == month { + return true + } + } + return false +} diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/.travis.yml b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 000000000..01c5dc219 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.9 + - "1.10" + - 1.11 + - 1.12 + +script: + - go test diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/LICENSE b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/README.md b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 000000000..6f3a15ce7 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/constraint.go b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 000000000..d05575961 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version.go b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 000000000..1032c5606 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,380 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqualTo tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version_collection.go b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 000000000..cc888d43e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/.gitignore b/scripts/token-log-collector/vendor/github.com/kr/pretty/.gitignore new file mode 100644 index 000000000..1f0a99f2f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/License b/scripts/token-log-collector/vendor/github.com/kr/pretty/License new file mode 100644 index 000000000..480a32805 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/Readme b/scripts/token-log-collector/vendor/github.com/kr/pretty/Readme new file mode 100644 index 000000000..c589fc622 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/Readme @@ -0,0 +1,9 @@ +package pretty + + import "github.com/kr/pretty" + + Package pretty provides pretty-printing for Go values. + +Documentation + + http://godoc.org/github.com/kr/pretty diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/diff.go b/scripts/token-log-collector/vendor/github.com/kr/pretty/diff.go new file mode 100644 index 000000000..6aa7f743a --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/diff.go @@ -0,0 +1,265 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (p *sbuf) Printf(format string, a ...interface{}) { + s := fmt.Sprintf(format, a...) + *p = append(*p, s) +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Pdiff((*sbuf)(&desc), a, b) + return desc +} + +// wprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type wprintfer struct{ w io.Writer } + +func (p *wprintfer) Printf(format string, a ...interface{}) { + fmt.Fprintf(p.w, format+"\n", a...) +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + Pdiff(&wprintfer{w}, a, b) +} + +type Printfer interface { + Printf(format string, a ...interface{}) +} + +// Pdiff prints to p a description of the differences between a and b. +// It calls Printf once for each difference, with no trailing newline. +// The standard library log.Logger is a Printfer. +func Pdiff(p Printfer, a, b interface{}) { + diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type Logfer interface { + Logf(format string, a ...interface{}) +} + +// logprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type logprintfer struct{ l Logfer } + +func (p *logprintfer) Printf(format string, a ...interface{}) { + p.l.Logf(format, a...) +} + +// Ldiff prints to l a description of the differences between a and b. +// It calls Logf once for each difference, with no trailing newline. +// The standard library testing.T and testing.B are Logfers. +func Ldiff(l Logfer, a, b interface{}) { + Pdiff(&logprintfer{l}, a, b) +} + +type diffPrinter struct { + w Printfer + l string // label +} + +func (w diffPrinter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + w.w.Printf(l+f, a...) +} + +func (w diffPrinter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %# v", formatter{v: bv, quote: true}) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%# v != nil", formatter{v: av, quote: true}) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + switch kind := at.Kind(); kind { + case reflect.Bool: + if a, b := av.Bool(), bv.Bool(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a, b := av.Int(), bv.Int(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if a, b := av.Uint(), bv.Uint(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Float32, reflect.Float64: + if a, b := av.Float(), bv.Float(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Complex64, reflect.Complex128: + if a, b := av.Complex(), bv.Complex(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Array: + n := av.Len() + for i := 0; i < n; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + if a, b := av.Pointer(), bv.Pointer(); a != b { + w.printf("%#x != %#x", a, b) + } + case reflect.Interface: + w.diff(av.Elem(), bv.Elem()) + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %# v", formatter{v: bv, quote: true}) + case !av.IsNil() && bv.IsNil(): + w.printf("%# v != nil", formatter{v: av, quote: true}) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Slice: + lenA := av.Len() + lenB := bv.Len() + if lenA != lenB { + w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) + break + } + for i := 0; i < lenA; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.String: + if a, b := av.String(), bv.String(); a != b { + w.printf("%q != %q", a, b) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + default: + panic("unknown reflect Kind: " + kind.String()) + } +} + +func (d diffPrinter) relabel(name string) (d1 diffPrinter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +// keyEqual compares a and b for equality. +// Both a and b must be valid map keys. +func keyEqual(av, bv reflect.Value) bool { + if !av.IsValid() && !bv.IsValid() { + return true + } + if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { + return false + } + switch kind := av.Kind(); kind { + case reflect.Bool: + a, b := av.Bool(), bv.Bool() + return a == b + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := av.Int(), bv.Int() + return a == b + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := av.Uint(), bv.Uint() + return a == b + case reflect.Float32, reflect.Float64: + a, b := av.Float(), bv.Float() + return a == b + case reflect.Complex64, reflect.Complex128: + a, b := av.Complex(), bv.Complex() + return a == b + case reflect.Array: + for i := 0; i < av.Len(); i++ { + if !keyEqual(av.Index(i), bv.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: + a, b := av.Pointer(), bv.Pointer() + return a == b + case reflect.Interface: + return keyEqual(av.Elem(), bv.Elem()) + case reflect.String: + a, b := av.String(), bv.String() + return a == b + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + if !keyEqual(av.Field(i), bv.Field(i)) { + return false + } + } + return true + default: + panic("invalid map key type " + av.Type().String()) + } +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if keyEqual(av, bv) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if keyEqual(av, bv) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/formatter.go b/scripts/token-log-collector/vendor/github.com/kr/pretty/formatter.go new file mode 100644 index 000000000..bf4b598d0 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/formatter.go @@ -0,0 +1,327 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +type formatter struct { + v reflect.Value + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{v: reflect.ValueOf(x), quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.v.Interface()) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(rune(i)) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.v.Interface()) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} + p.printValue(fo.v, true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer + visited map[visit]int + depth int +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +// printValue must keep track of already-printed pointer values to avoid +// infinite recursion. +type visit struct { + v uintptr + typ reflect.Type +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + if p.depth > 10 { + io.WriteString(p, "!%v(DEPTH EXCEEDED)") + return + } + + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct := t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if v.CanAddr() { + addr := v.UnsafeAddr() + vis := visit{addr, t} + if vd, ok := p.visited[vis]; ok && vd < p.depth { + p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) + break // don't print v again + } + p.visited[vis] = p.depth + } + + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + pp := *p + pp.depth++ + pp.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + pp := *p + pp.depth++ + writeByte(pp, '&') + pp.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/pretty.go b/scripts/token-log-collector/vendor/github.com/kr/pretty/pretty.go new file mode 100644 index 000000000..b4ca583c0 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/pretty.go @@ -0,0 +1,108 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" + "reflect" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Println(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprint is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprint(x, y) is equivalent to +// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Sprint(a ...interface{}) string { + return fmt.Sprint(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{v: reflect.ValueOf(x), force: force} + } + return w +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/pretty/zero.go b/scripts/token-log-collector/vendor/github.com/kr/pretty/zero.go new file mode 100644 index 000000000..abb5b6fc1 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/text/License b/scripts/token-log-collector/vendor/github.com/kr/text/License new file mode 100644 index 000000000..480a32805 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/github.com/kr/text/Readme b/scripts/token-log-collector/vendor/github.com/kr/text/Readme new file mode 100644 index 000000000..7e6e7c068 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/text/Readme @@ -0,0 +1,3 @@ +This is a Go package for manipulating paragraphs of text. + +See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/scripts/token-log-collector/vendor/github.com/kr/text/doc.go b/scripts/token-log-collector/vendor/github.com/kr/text/doc.go new file mode 100644 index 000000000..cf4c198f9 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/scripts/token-log-collector/vendor/github.com/kr/text/indent.go b/scripts/token-log-collector/vendor/github.com/kr/text/indent.go new file mode 100644 index 000000000..4ebac45c0 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/kr/text/wrap.go b/scripts/token-log-collector/vendor/github.com/kr/text/wrap.go new file mode 100644 index 000000000..b09bb0373 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim || i == n-1 { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/scripts/token-log-collector/vendor/github.com/peterhellberg/link/.travis.yml b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/.travis.yml new file mode 100644 index 000000000..350f9d423 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/.travis.yml @@ -0,0 +1,7 @@ +language: go + +dist: bionic + +go: + - "1.13.4" + - "1.12.13" diff --git a/scripts/token-log-collector/vendor/github.com/peterhellberg/link/LICENSE b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/LICENSE new file mode 100644 index 000000000..7975bec62 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-2019 Peter Hellberg https://c7.se + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/scripts/token-log-collector/vendor/github.com/peterhellberg/link/README.md b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/README.md new file mode 100644 index 000000000..c2972328f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/README.md @@ -0,0 +1,82 @@ +# link + +[![Build Status](https://travis-ci.org/peterhellberg/link.svg?branch=master)](https://travis-ci.org/peterhellberg/link) +[![Go Report Card](https://goreportcard.com/badge/github.com/peterhellberg/link)](https://goreportcard.com/report/github.com/peterhellberg/link) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/peterhellberg/link) +[![License MIT](https://img.shields.io/badge/license-MIT-lightgrey.svg?style=flat)](https://github.com/peterhellberg/link#license-mit) + +Parses **Link** headers used for pagination, as defined in [RFC 5988](https://tools.ietf.org/html/rfc5988). + +This package was originally based on , but **Parse** takes a `string` instead of `*http.Request` in this version. +It also has the convenience functions **ParseHeader**, **ParseRequest** and **ParseResponse**. + +## Installation + + go get -u github.com/peterhellberg/link + +## Exported functions + + - [Parse(s string) Group](https://godoc.org/github.com/peterhellberg/link#Parse) + - [ParseHeader(h http.Header) Group](https://godoc.org/github.com/peterhellberg/link#ParseHeader) + - [ParseRequest(req \*http.Request) Group](https://godoc.org/github.com/peterhellberg/link#ParseRequest) + - [ParseResponse(resp \*http.Response) Group](https://godoc.org/github.com/peterhellberg/link#ParseResponse) + +## Usage + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/peterhellberg/link" +) + +func main() { + for _, l := range link.Parse(`; rel="next"; foo="bar"`) { + fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra) + // URI: "https://example.com/?page=2", Rel: "next", Extra: map[foo:bar] + } + + if resp, err := http.Get("https://api.github.com/search/code?q=Println+user:golang"); err == nil { + for _, l := range link.ParseResponse(resp) { + fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra) + // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=2", Rel: "next", Extra: map[] + // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=34", Rel: "last", Extra: map[] + } + } +} +``` + +## Not supported + + - Extended notation ([RFC 5987](https://tools.ietf.org/html/rfc5987)) + +## Alternatives to this package + + - [github.com/tent/http-link-go](https://github.com/tent/http-link-go) + - [github.com/swhite24/link](https://github.com/swhite24/link) + +## License (MIT) + +Copyright (c) 2015-2019 [Peter Hellberg](https://c7.se) + +> Permission is hereby granted, free of charge, to any person obtaining +> a copy of this software and associated documentation files (the +> "Software"), to deal in the Software without restriction, including +> without limitation the rights to use, copy, modify, merge, publish, +> distribute, sublicense, and/or sell copies of the Software, and to +> permit persons to whom the Software is furnished to do so, subject to +> the following conditions: + +> The above copyright notice and this permission notice shall be +> included in all copies or substantial portions of the Software. + +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +> NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +> LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +> OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +> WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/github.com/peterhellberg/link/doc.go b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/doc.go new file mode 100644 index 000000000..f3a5f172e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/doc.go @@ -0,0 +1,40 @@ +/* + +Package link parses Link headers used for pagination, as defined in RFC 5988 + +Installation + +Just go get the package: + + go get -u github.com/peterhellberg/link + +Usage + +A small usage example + + package main + + import ( + "fmt" + "net/http" + + "github.com/peterhellberg/link" + ) + + func main() { + for _, l := range link.Parse(`; rel="next"; foo="bar"`) { + fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra) + // URI: "https://example.com/?page=2", Rel: "next", Extra: map[foo:bar] + } + + if resp, err := http.Get("https://api.github.com/search/code?q=Println+user:golang"); err == nil { + for _, l := range link.ParseResponse(resp) { + fmt.Printf("URI: %q, Rel: %q, Extra: %+v\n", l.URI, l.Rel, l.Extra) + // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=2", Rel: "next", Extra: map[] + // URI: "https://api.github.com/search/code?q=Println+user%3Agolang&page=34", Rel: "last", Extra: map[] + } + } + } + +*/ +package link diff --git a/scripts/token-log-collector/vendor/github.com/peterhellberg/link/link.go b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/link.go new file mode 100644 index 000000000..8c2f02525 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/peterhellberg/link/link.go @@ -0,0 +1,111 @@ +package link + +import ( + "net/http" + "regexp" + "strings" +) + +var ( + commaRegexp = regexp.MustCompile(`,\s{0,}`) + valueCommaRegexp = regexp.MustCompile(`([^"]),`) + equalRegexp = regexp.MustCompile(` *= *`) + keyRegexp = regexp.MustCompile(`[a-z*]+`) + linkRegexp = regexp.MustCompile(`\A<(.+)>;(.+)\z`) + semiRegexp = regexp.MustCompile(`; +`) + valRegexp = regexp.MustCompile(`"+([^"]+)"+`) +) + +// Group returned by Parse, contains multiple links indexed by "rel" +type Group map[string]*Link + +// Link contains a Link item with URI, Rel, and other non-URI components in Extra. +type Link struct { + URI string + Rel string + Extra map[string]string +} + +// String returns the URI +func (l *Link) String() string { + return l.URI +} + +// ParseRequest parses the provided *http.Request into a Group +func ParseRequest(req *http.Request) Group { + if req == nil { + return nil + } + + return ParseHeader(req.Header) +} + +// ParseResponse parses the provided *http.Response into a Group +func ParseResponse(resp *http.Response) Group { + if resp == nil { + return nil + } + + return ParseHeader(resp.Header) +} + +// ParseHeader retrieves the Link header from the provided http.Header and parses it into a Group +func ParseHeader(h http.Header) Group { + if headers, found := h["Link"]; found { + return Parse(strings.Join(headers, ", ")) + } + + return nil +} + +// Parse parses the provided string into a Group +func Parse(s string) Group { + if s == "" { + return nil + } + + s = valueCommaRegexp.ReplaceAllString(s, "$1") + + group := Group{} + + for _, l := range commaRegexp.Split(s, -1) { + linkMatches := linkRegexp.FindAllStringSubmatch(l, -1) + + if len(linkMatches) == 0 { + return nil + } + + pieces := linkMatches[0] + + link := &Link{URI: pieces[1], Extra: map[string]string{}} + + for _, extra := range semiRegexp.Split(pieces[2], -1) { + vals := equalRegexp.Split(extra, -1) + + key := keyRegexp.FindString(vals[0]) + val := valRegexp.FindStringSubmatch(vals[1])[1] + + if key == "rel" { + vals := strings.Split(val, " ") + rels := []string{vals[0]} + + if len(vals) > 1 { + for _, v := range vals[1:] { + if !strings.HasPrefix(v, "http") { + rels = append(rels, v) + } + } + } + + rel := strings.Join(rels, " ") + + link.Rel = rel + group[rel] = link + } else { + link.Extra[key] = val + } + } + } + + return group +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/LICENSE b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/LICENSE new file mode 100644 index 000000000..09cb542ab --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/LICENSE @@ -0,0 +1,182 @@ +go-vcloud-director + +Copyright (c) 2018 VMware, Inc. All rights reserved + +The Apache 2.0 license (the "License") set forth below applies to +all parts of the go-vcloud-director project except as noted below. +You may not use this file except in compliance with the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control +with that entity. For the purposes of this definition, "control" means +(i) the power, direct or indirect, to cause the direction or management +of such entity, whether by contract or otherwise, or (ii) ownership +of fifty percent (50%) or more of the outstanding shares, or (iii) +beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media +types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a copyright +notice that is included in or attached to the work (an example is provided +in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, +as a whole, an original work of authorship. For the purposes of this +License, Derivative Works shall not include works that remain separable +from, or merely link (or bind by name) to the interfaces of, the Work +and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the +original version of the Work and any modifications or additions to +that Work or Derivative Works thereof, that is intentionally submitted +to Licensor for inclusion in the Work by the copyright owner or by an +individual or Legal Entity authorized to submit on behalf of the copyright +owner. For the purposes of this definition, "submitted" means any form of +electronic, verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems +that are managed by, or on behalf of, the Licensor for the purpose of +discussing and improving the Work, but excluding communication that is +conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. +Subject to the terms and conditions of this License, each Contributor +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable copyright license to reproduce, prepare +Derivative Works of, publicly display, publicly perform, sublicense, and +distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. +Subject to the terms and conditions of this License, each Contributor +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty- free, irrevocable (except as stated in this section) patent +license to make, have made, use, offer to sell, sell, import, and +otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of +their Contribution(s) with the Work to which such Contribution(s) +was submitted. If You institute patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Work or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses granted +to You under this License for that Work shall terminate as of the date +such litigation is filed. + +4. Redistribution. +You may reproduce and distribute copies of the Work or Derivative Works +thereof in any medium, with or without modifications, and in Source or +Object form, provided that You meet the following conditions: + + a. You must give any other recipients of the Work or Derivative Works + a copy of this License; and + + b. You must cause any modified files to carry prominent notices stating + that You changed the files; and + + c. You must retain, in the Source form of any Derivative Works that + You distribute, all copyright, patent, trademark, and attribution + notices from the Source form of the Work, excluding those notices + that do not pertain to any part of the Derivative Works; and + + d. If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one of + the following places: within a NOTICE text file distributed as part + of the Derivative Works; within the Source form or documentation, + if provided along with the Derivative Works; or, within a display + generated by the Derivative Works, if and wherever such third-party + notices normally appear. The contents of the NOTICE file are for + informational purposes only and do not modify the License. You + may add Your own attribution notices within Derivative Works that + You distribute, alongside or as an addendum to the NOTICE text + from the Work, provided that such additional attribution notices + cannot be construed as modifying the License. You may add Your own + copyright statement to Your modifications and may provide additional + or different license terms and conditions for use, reproduction, or + distribution of Your modifications, or for any such Derivative Works + as a whole, provided Your use, reproduction, and distribution of the + Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. +Unless You explicitly state otherwise, any Contribution intentionally +submitted for inclusion in the Work by You to the Licensor shall be +under the terms and conditions of this License, without any additional +terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may +have executed with Licensor regarding such Contributions. + +6. Trademarks. +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. +Unless required by applicable law or agreed to in writing, Licensor +provides the Work (and each Contributor provides its Contributions) on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied, including, without limitation, any warranties or +conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR +A PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. +In no event and under no legal theory, whether in tort (including +negligence), contract, or otherwise, unless required by applicable law +(such as deliberate and grossly negligent acts) or agreed to in writing, +shall any Contributor be liable to You for damages, including any direct, +indirect, special, incidental, or consequential damages of any character +arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all other +commercial damages or losses), even if such Contributor has been advised +of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. +While redistributing the Work or Derivative Works thereof, You may +choose to offer, and charge a fee for, acceptance of support, warranty, +indemnity, or other liability obligations and/or rights consistent with +this License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf of +any other Contributor, and only if You agree to indemnify, defend, and +hold each Contributor harmless for any liability incurred by, or claims +asserted against, such Contributor by reason of your accepting any such +warranty or additional liability. + +END OF TERMS AND CONDITIONS + diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/NOTICE b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/NOTICE new file mode 100644 index 000000000..86b037a8f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/NOTICE @@ -0,0 +1,12 @@ +go-vcloud-director + +Copyright (c) 2018 VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache 2.0 license (the +"License"). You may not use this product except in compliance with +the Apache 2.0 License. + +This product may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of the subcomponent's license, +as noted in the LICENSE file. diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/access_control.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/access_control.go new file mode 100644 index 000000000..016497b42 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/access_control.go @@ -0,0 +1,356 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// orgInfoCache is a cache to save org information, avoid repeated calls to compute the same result. +// The keys to this map are the requesting objects IDs. +var orgInfoCache = make(map[string]*TenantContext) + +// GetAccessControl retrieves the access control information for the requested entity +func (client Client) GetAccessControl(href, entityType, entityName string, headerValues map[string]string) (*types.ControlAccessParams, error) { + + href += "/controlAccess" + var controlAccess types.ControlAccessParams + + acUrl, err := url.ParseRequestURI(href) + if err != nil { + return nil, fmt.Errorf("[client.GetAccessControl] error parsing HREF %s: %s", href, err) + } + var additionalHeader = make(http.Header) + + if len(headerValues) > 0 { + for k, v := range headerValues { + additionalHeader.Add(k, v) + } + } + req := client.newRequest( + nil, // params + nil, // notEncodedParams + http.MethodGet, // method + *acUrl, // reqUrl + nil, // body + client.APIVersion, // apiVersion + additionalHeader, // additionalHeader + ) + + resp, err := checkResp(client.Http.Do(req)) + if err != nil { + return nil, fmt.Errorf("[client.GetAccessControl] error checking response to request %s: %s", href, err) + } + if resp == nil { + return nil, fmt.Errorf("[client.GetAccessControl] nil response received") + } + if err = decodeBody(types.BodyTypeXML, resp, &controlAccess); err != nil { + return nil, fmt.Errorf("[client.GetAccessControl] error decoding response: %s", err) + } + + return &controlAccess, nil +} + +// SetAccessControl changes the access control information for this entity +// There are two ways of setting the access: +// with accessControl.IsSharedToEveryone = true we give access to everyone +// with accessControl.IsSharedToEveryone = false, accessControl.AccessSettings defines which subjects can access the vApp +// For each setting we must provide: +// * The subject (HREF and Type are mandatory) +// * The access level (one of ReadOnly, Change, FullControl) +func (client *Client) SetAccessControl(accessControl *types.ControlAccessParams, href, entityType, entityName string, headerValues map[string]string) error { + + href += "/action/controlAccess" + // Make sure that subjects in the setting list are used only once + if accessControl.AccessSettings != nil && len(accessControl.AccessSettings.AccessSetting) > 0 { + if accessControl.IsSharedToEveryone { + return fmt.Errorf("[client.SetAccessControl] can't set IsSharedToEveryone and AccessSettings at the same time for %s %s (%s)", entityType, entityName, href) + } + var used = make(map[string]bool) + for _, setting := range accessControl.AccessSettings.AccessSetting { + _, seen := used[setting.Subject.HREF] + if seen { + return fmt.Errorf("[client.SetAccessControl] subject %s (%s) used more than once", setting.Subject.Name, setting.Subject.HREF) + } + used[setting.Subject.HREF] = true + if setting.Subject.Type == "" { + return fmt.Errorf("[client.SetAccessControl] subject %s (%s) has no type defined", setting.Subject.Name, setting.Subject.HREF) + } + } + } + + accessControl.Xmlns = types.XMLNamespaceVCloud + queryUrl, err := url.ParseRequestURI(href) + if err != nil { + return fmt.Errorf("[client.SetAccessControl] error parsing HREF %s: %s", href, err) + } + + var header = make(http.Header) + if len(headerValues) > 0 { + for k, v := range headerValues { + header.Add(k, v) + } + } + + marshaledXml, err := xml.MarshalIndent(accessControl, " ", " ") + if err != nil { + return fmt.Errorf("[client.SetAccessControl] error marshalling xml data: %s", err) + } + body := bytes.NewBufferString(xml.Header + string(marshaledXml)) + + req := client.newRequest( + nil, // params + nil, // notEncodedParams + http.MethodPost, // method + *queryUrl, // reqUrl + body, // body + client.APIVersion, // apiVersion + header, // additionalHeader + ) + + resp, err := checkResp(client.Http.Do(req)) + + if err != nil { + return fmt.Errorf("[client.SetAccessControl] error checking response to HREF %s: %s", href, err) + } + if resp == nil { + return fmt.Errorf("[client.SetAccessControl] nil response received") + } + _, err = checkResp(resp, err) + return err +} + +// GetAccessControl retrieves the access control information for this vApp +func (vapp VApp) GetAccessControl(useTenantContext bool) (*types.ControlAccessParams, error) { + + if vapp.VApp.HREF == "" { + return nil, fmt.Errorf("vApp HREF is empty") + } + // if useTenantContext is false, we use an empty header (= default behavior) + // if it is true, we use a header populated with tenant context values + accessControlHeader, err := vapp.getAccessControlHeader(useTenantContext) + if err != nil { + return nil, err + } + return vapp.client.GetAccessControl(vapp.VApp.HREF, "vApp", vapp.VApp.Name, accessControlHeader) +} + +// SetAccessControl changes the access control information for this vApp +func (vapp VApp) SetAccessControl(accessControl *types.ControlAccessParams, useTenantContext bool) error { + + if vapp.VApp.HREF == "" { + return fmt.Errorf("vApp HREF is empty") + } + + // if useTenantContext is false, we use an empty header (= default behavior) + // if it is true, we use a header populated with tenant context values + accessControlHeader, err := vapp.getAccessControlHeader(useTenantContext) + if err != nil { + return err + } + return vapp.client.SetAccessControl(accessControl, vapp.VApp.HREF, "vApp", vapp.VApp.Name, accessControlHeader) + +} + +// RemoveAccessControl is a shortcut to SetAccessControl with all access disabled +func (vapp VApp) RemoveAccessControl(useTenantContext bool) error { + return vapp.SetAccessControl(&types.ControlAccessParams{IsSharedToEveryone: false}, useTenantContext) +} + +// IsShared shows whether a vApp is shared or not, regardless of the number of subjects sharing it +func (vapp VApp) IsShared(useTenantContext bool) (bool, error) { + settings, err := vapp.GetAccessControl(useTenantContext) + if err != nil { + return false, err + } + if settings.IsSharedToEveryone { + return true, nil + } + return settings.AccessSettings != nil, nil +} + +// GetAccessControl retrieves the access control information for this catalog +func (adminCatalog AdminCatalog) GetAccessControl(useTenantContext bool) (*types.ControlAccessParams, error) { + + if adminCatalog.AdminCatalog.HREF == "" { + return nil, fmt.Errorf("catalog HREF is empty") + } + href := strings.Replace(adminCatalog.AdminCatalog.HREF, "/admin/", "/", 1) + + // if useTenantContext is false, we use an empty header (= default behavior) + // if it is true, we use a header populated with tenant context values + accessControlHeader, err := adminCatalog.getAccessControlHeader(useTenantContext) + if err != nil { + return nil, err + } + return adminCatalog.client.GetAccessControl(href, "catalog", adminCatalog.AdminCatalog.Name, accessControlHeader) +} + +// SetAccessControl changes the access control information for this catalog +func (adminCatalog AdminCatalog) SetAccessControl(accessControl *types.ControlAccessParams, useTenantContext bool) error { + + if adminCatalog.AdminCatalog.HREF == "" { + return fmt.Errorf("catalog HREF is empty") + } + href := strings.Replace(adminCatalog.AdminCatalog.HREF, "/admin/", "/", 1) + + // if useTenantContext is false, we use an empty header (= default behavior) + // if it is true, we use a header populated with tenant context values + accessControlHeader, err := adminCatalog.getAccessControlHeader(useTenantContext) + if err != nil { + return err + } + return adminCatalog.client.SetAccessControl(accessControl, href, "catalog", adminCatalog.AdminCatalog.Name, accessControlHeader) +} + +// RemoveAccessControl is a shortcut to SetAccessControl with all access disabled +func (adminCatalog AdminCatalog) RemoveAccessControl(useTenantContext bool) error { + return adminCatalog.SetAccessControl(&types.ControlAccessParams{IsSharedToEveryone: false}, useTenantContext) +} + +// IsShared shows whether a catalog is shared or not, regardless of the number of subjects sharing it +func (adminCatalog AdminCatalog) IsShared(useTenantContext bool) (bool, error) { + settings, err := adminCatalog.GetAccessControl(useTenantContext) + if err != nil { + return false, err + } + if settings.IsSharedToEveryone { + return true, nil + } + return settings.AccessSettings != nil, nil +} + +// GetVappAccessControl is a convenience method to retrieve access control for a vApp +// from a VDC. +// The input variable vappIdentifier can be either the vApp name or its ID +func (vdc *Vdc) GetVappAccessControl(vappIdentifier string, useTenantContext bool) (*types.ControlAccessParams, error) { + vapp, err := vdc.GetVAppByNameOrId(vappIdentifier, true) + if err != nil { + return nil, fmt.Errorf("error retrieving vApp %s: %s", vappIdentifier, err) + } + return vapp.GetAccessControl(useTenantContext) +} + +// GetCatalogAccessControl is a convenience method to retrieve access control for a catalog +// from an organization. +// The input variable catalogIdentifier can be either the catalog name or its ID +func (org *AdminOrg) GetCatalogAccessControl(catalogIdentifier string, useTenantContext bool) (*types.ControlAccessParams, error) { + catalog, err := org.GetAdminCatalogByNameOrId(catalogIdentifier, true) + if err != nil { + return nil, fmt.Errorf("error retrieving catalog %s: %s", catalogIdentifier, err) + } + return catalog.GetAccessControl(useTenantContext) +} + +// GetCatalogAccessControl is a convenience method to retrieve access control for a catalog +// from an organization. +// The input variable catalogIdentifier can be either the catalog name or its ID +func (org *Org) GetCatalogAccessControl(catalogIdentifier string, useTenantContext bool) (*types.ControlAccessParams, error) { + catalog, err := org.GetCatalogByNameOrId(catalogIdentifier, true) + if err != nil { + return nil, fmt.Errorf("error retrieving catalog %s: %s", catalogIdentifier, err) + } + return catalog.GetAccessControl(useTenantContext) +} + +// GetAccessControl retrieves the access control information for this catalog +func (catalog Catalog) GetAccessControl(useTenantContext bool) (*types.ControlAccessParams, error) { + + if catalog.Catalog.HREF == "" { + return nil, fmt.Errorf("catalog HREF is empty") + } + href := strings.Replace(catalog.Catalog.HREF, "/admin/", "/", 1) + accessControlHeader, err := catalog.getAccessControlHeader(useTenantContext) + if err != nil { + return nil, err + } + return catalog.client.GetAccessControl(href, "catalog", catalog.Catalog.Name, accessControlHeader) +} + +// SetAccessControl changes the access control information for this catalog +func (catalog Catalog) SetAccessControl(accessControl *types.ControlAccessParams, useTenantContext bool) error { + + if catalog.Catalog.HREF == "" { + return fmt.Errorf("catalog HREF is empty") + } + + href := strings.Replace(catalog.Catalog.HREF, "/admin/", "/", 1) + + // if useTenantContext is false, we use an empty header (= default behavior) + // if it is true, we use a header populated with tenant context values + accessControlHeader, err := catalog.getAccessControlHeader(useTenantContext) + if err != nil { + return err + } + return catalog.client.SetAccessControl(accessControl, href, "catalog", catalog.Catalog.Name, accessControlHeader) +} + +// RemoveAccessControl is a shortcut to SetAccessControl with all access disabled +func (catalog Catalog) RemoveAccessControl(useTenantContext bool) error { + return catalog.SetAccessControl(&types.ControlAccessParams{IsSharedToEveryone: false}, useTenantContext) +} + +// IsShared shows whether a catalog is shared or not, regardless of the number of subjects sharing it +func (catalog Catalog) IsShared(useTenantContext bool) (bool, error) { + settings, err := catalog.GetAccessControl(useTenantContext) + if err != nil { + return false, err + } + if settings.IsSharedToEveryone { + return true, nil + } + return settings.AccessSettings != nil, nil +} + +// getAccessControlHeader builds the data needed to set the header when tenant context is required. +// If useTenantContext is false, it returns an empty map. +// Otherwise, it finds the Org ID and name (going up in the hierarchy through the VDC) +// and creates the header data +func (vapp *VApp) getAccessControlHeader(useTenantContext bool) (map[string]string, error) { + if !useTenantContext { + return map[string]string{}, nil + } + orgInfo, err := vapp.getOrgInfo() + if err != nil { + return nil, err + } + return map[string]string{types.HeaderTenantContext: orgInfo.OrgId, types.HeaderAuthContext: orgInfo.OrgName}, nil +} + +// getAccessControlHeader builds the data needed to set the header when tenant context is required. +// If useTenantContext is false, it returns an empty map. +// Otherwise, it finds the Org ID and name and creates the header data +func (catalog *Catalog) getAccessControlHeader(useTenantContext bool) (map[string]string, error) { + if !useTenantContext { + return map[string]string{}, nil + } + orgInfo, err := catalog.getOrgInfo() + if err != nil { + return nil, err + } + return map[string]string{types.HeaderTenantContext: orgInfo.OrgId, types.HeaderAuthContext: orgInfo.OrgName}, nil +} + +// getAccessControlHeader builds the data needed to set the header when tenant context is required. +// If useTenantContext is false, it returns an empty map. +// Otherwise, it finds the Org ID and name and creates the header data +func (adminCatalog *AdminCatalog) getAccessControlHeader(useTenantContext bool) (map[string]string, error) { + if !useTenantContext { + return map[string]string{}, nil + } + orgInfo, err := adminCatalog.getOrgInfo() + + if err != nil { + return nil, err + } + return map[string]string{types.HeaderTenantContext: orgInfo.OrgId, types.HeaderAuthContext: orgInfo.OrgName}, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/admincatalog.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/admincatalog.go new file mode 100644 index 000000000..da3f9614a --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/admincatalog.go @@ -0,0 +1,94 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// AdminCatalog is a admin view of a vCloud Director Catalog +// To be able to get an AdminCatalog representation, users must have +// admin credentials to the System org. AdminCatalog is used +// for creating, updating, and deleting a Catalog. +// Definition: https://code.vmware.com/apis/220/vcloud#/doc/doc/types/AdminCatalogType.html +type AdminCatalog struct { + AdminCatalog *types.AdminCatalog + client *Client + parent organization +} + +func NewAdminCatalog(client *Client) *AdminCatalog { + return &AdminCatalog{ + AdminCatalog: new(types.AdminCatalog), + client: client, + } +} + +// Delete deletes the Catalog, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Catalog.html +func (adminCatalog *AdminCatalog) Delete(force, recursive bool) error { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + return catalog.Delete(force, recursive) +} + +// Update updates the Catalog definition from current Catalog struct contents. +// Any differences that may be legally applied will be updated. +// Returns an error if the call to vCD fails. Update automatically performs +// a refresh with the admin catalog it gets back from the rest api +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/PUT-Catalog.html +func (adminCatalog *AdminCatalog) Update() error { + reqCatalog := &types.Catalog{ + Name: adminCatalog.AdminCatalog.Catalog.Name, + Description: adminCatalog.AdminCatalog.Description, + } + vcomp := &types.AdminCatalog{ + Xmlns: types.XMLNamespaceVCloud, + Catalog: *reqCatalog, + CatalogStorageProfiles: adminCatalog.AdminCatalog.CatalogStorageProfiles, + IsPublished: adminCatalog.AdminCatalog.IsPublished, + } + catalog := &types.AdminCatalog{} + _, err := adminCatalog.client.ExecuteRequest(adminCatalog.AdminCatalog.HREF, http.MethodPut, + "application/vnd.vmware.admin.catalog+xml", "error updating catalog: %s", vcomp, catalog) + adminCatalog.AdminCatalog = catalog + return err +} + +// UploadOvf uploads an ova file to a catalog. This method only uploads bits to vCD spool area. +// Returns errors if any occur during upload from vCD or upload process. On upload fail client may need to +// remove vCD catalog item which waits for files to be uploaded. Files from ova are extracted to system +// temp folder "govcd+random number" and left for inspection on error. +func (adminCatalog *AdminCatalog) UploadOvf(ovaFileName, itemName, description string, uploadPieceSize int64) (UploadTask, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.UploadOvf(ovaFileName, itemName, description, uploadPieceSize) +} + +func (adminCatalog *AdminCatalog) Refresh() error { + if *adminCatalog == (AdminCatalog{}) || adminCatalog.AdminCatalog.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty or HREF is empty") + } + + refreshedCatalog := &types.AdminCatalog{} + + _, err := adminCatalog.client.ExecuteRequest(adminCatalog.AdminCatalog.HREF, http.MethodGet, + "", "error refreshing VDC: %s", nil, refreshedCatalog) + if err != nil { + return err + } + adminCatalog.AdminCatalog = refreshedCatalog + + return nil +} + +// getOrgInfo finds the organization to which the admin catalog belongs, and returns its name and ID +func (adminCatalog *AdminCatalog) getOrgInfo() (*TenantContext, error) { + return adminCatalog.getTenantContext() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg.go new file mode 100644 index 000000000..793ad12eb --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg.go @@ -0,0 +1,777 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/vmware/go-vcloud-director/v2/util" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// AdminOrg gives an admin representation of an org. +// Administrators can delete and update orgs with an admin org object. +// AdminOrg includes all members of the Org element, and adds several +// elements that can be viewed and modified only by system administrators. +// Definition: https://code.vmware.com/apis/220/vcloud#/doc/doc/types/AdminOrgType.html +type AdminOrg struct { + AdminOrg *types.AdminOrg + client *Client + TenantContext *TenantContext +} + +func NewAdminOrg(cli *Client) *AdminOrg { + return &AdminOrg{ + AdminOrg: new(types.AdminOrg), + client: cli, + } +} + +// CreateCatalog creates a catalog with given name and description under the +// the given organization. Returns an AdminCatalog that contains a creation +// task. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/POST-CreateCatalog.html +func (adminOrg *AdminOrg) CreateCatalog(name, description string) (AdminCatalog, error) { + catalog, err := adminOrg.CreateCatalogWithStorageProfile(name, description, nil) + if err != nil { + return AdminCatalog{}, err + } + catalog.parent = adminOrg + return *catalog, nil +} + +// CreateCatalogWithStorageProfile is like CreateCatalog, but allows to specify storage profile +func (adminOrg *AdminOrg) CreateCatalogWithStorageProfile(name, description string, storageProfiles *types.CatalogStorageProfiles) (*AdminCatalog, error) { + return CreateCatalogWithStorageProfile(adminOrg.client, adminOrg.AdminOrg.Link, name, description, storageProfiles) +} + +// GetAllVDCs returns all depending VDCs for a particular Org +func (adminOrg *AdminOrg) GetAllVDCs(refresh bool) ([]*Vdc, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + allVdcs := make([]*Vdc, len(adminOrg.AdminOrg.Vdcs.Vdcs)) + for vdcIndex, vdc := range adminOrg.AdminOrg.Vdcs.Vdcs { + vdc, err := adminOrg.GetVDCByHref(vdc.HREF) + if err != nil { + return nil, fmt.Errorf("error retrieving VDC '%s': %s", vdc.Vdc.Name, err) + } + allVdcs[vdcIndex] = vdc + + } + + return allVdcs, nil +} + +// GetAllStorageProfileReferences traverses all depending VDCs and returns a slice of storage profile references +// available in those VDCs +func (adminOrg *AdminOrg) GetAllStorageProfileReferences(refresh bool) ([]*types.Reference, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + allVdcs, err := adminOrg.GetAllVDCs(refresh) + if err != nil { + return nil, fmt.Errorf("could not retrieve storage profile references: %s", err) + } + + allStorageProfileReferences := make([]*types.Reference, 0) + for _, vdc := range allVdcs { + if len(vdc.Vdc.VdcStorageProfiles.VdcStorageProfile) > 0 { + allStorageProfileReferences = append(allStorageProfileReferences, vdc.Vdc.VdcStorageProfiles.VdcStorageProfile...) + } + } + + return allStorageProfileReferences, nil +} + +// GetStorageProfileReferenceById finds storage profile reference by specified ID in Org or returns ErrorEntityNotFound +func (adminOrg *AdminOrg) GetStorageProfileReferenceById(id string, refresh bool) (*types.Reference, error) { + allStorageProfiles, err := adminOrg.GetAllStorageProfileReferences(refresh) + if err != nil { + return nil, fmt.Errorf("error getting all storage profiles: %s", err) + } + + for _, storageProfileReference := range allStorageProfiles { + if storageProfileReference.ID == id { + return storageProfileReference, nil + } + } + + return nil, fmt.Errorf("%s: storage profile with ID '%s' not found in Org '%s'", + ErrorEntityNotFound, id, adminOrg.AdminOrg.Name) +} + +// Deletes the org, returning an error if the vCD call fails. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Organization.html +func (adminOrg *AdminOrg) Delete(force bool, recursive bool) error { + if force && recursive { + //undeploys vapps + err := adminOrg.undeployAllVApps() + if err != nil { + return fmt.Errorf("error could not undeploy: %s", err) + } + //removes vapps + err = adminOrg.removeAllVApps() + if err != nil { + return fmt.Errorf("error could not remove vapp: %s", err) + } + //removes catalogs + err = adminOrg.removeCatalogs() + if err != nil { + return fmt.Errorf("error could not remove all catalogs: %s", err) + } + //removes networks + err = adminOrg.removeAllOrgNetworks() + if err != nil { + return fmt.Errorf("error could not remove all networks: %s", err) + } + //removes org vdcs + err = adminOrg.removeAllOrgVDCs() + if err != nil { + return fmt.Errorf("error could not remove all vdcs: %s", err) + } + } + // Disable org + err := adminOrg.Disable() + if err != nil { + return fmt.Errorf("error disabling Org %s: %s", adminOrg.AdminOrg.Name, err) + } + // Get admin HREF + orgHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return fmt.Errorf("error getting AdminOrg HREF %s : %s", adminOrg.AdminOrg.HREF, err) + } + req := adminOrg.client.NewRequest(map[string]string{ + "force": strconv.FormatBool(force), + "recursive": strconv.FormatBool(recursive), + }, http.MethodDelete, *orgHREF, nil) + resp, err := checkResp(adminOrg.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error deleting Org %s: %s", adminOrg.AdminOrg.ID, err) + } + + task := NewTask(adminOrg.client) + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + return task.WaitTaskCompletion() +} + +// Disables the org. Returns an error if the call to vCD fails. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/POST-DisableOrg.html +func (adminOrg *AdminOrg) Disable() error { + orgHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return fmt.Errorf("error getting AdminOrg HREF %s : %s", adminOrg.AdminOrg.HREF, err) + } + orgHREF.Path += "/action/disable" + + return adminOrg.client.ExecuteRequestWithoutResponse(orgHREF.String(), http.MethodPost, "", "error disabling organization: %s", nil) +} + +// Updates the Org definition from current org struct contents. +// Any differences that may be legally applied will be updated. +// Returns an error if the call to vCD fails. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/PUT-Organization.html +func (adminOrg *AdminOrg) Update() (Task, error) { + vcomp := &types.AdminOrg{ + Xmlns: types.XMLNamespaceVCloud, + Name: adminOrg.AdminOrg.Name, + IsEnabled: adminOrg.AdminOrg.IsEnabled, + FullName: adminOrg.AdminOrg.FullName, + Description: adminOrg.AdminOrg.Description, + OrgSettings: adminOrg.AdminOrg.OrgSettings, + } + + // Same workaround used in Org creation, where OrgGeneralSettings properties + // are not set unless UseServerBootSequence is also set + if vcomp.OrgSettings.OrgGeneralSettings != nil { + vcomp.OrgSettings.OrgGeneralSettings.UseServerBootSequence = true + } + + // Return the task + return adminOrg.client.ExecuteTaskRequest(adminOrg.AdminOrg.HREF, http.MethodPut, + "application/vnd.vmware.admin.organization+xml", "error updating Org: %s", vcomp) +} + +// Undeploys every vapp within an organization +func (adminOrg *AdminOrg) undeployAllVApps() error { + for _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs { + adminVdcHREF, err := url.Parse(vdcs.HREF) + if err != nil { + return err + } + vdc, err := adminOrg.getVdcByAdminHREF(adminVdcHREF) + if err != nil { + return fmt.Errorf("error retrieving vapp with url: %s and with error %s", adminVdcHREF.Path, err) + } + err = vdc.undeployAllVdcVApps() + if err != nil { + return fmt.Errorf("error deleting vapp: %s", err) + } + } + return nil +} + +// Deletes every vapp within an organization +func (adminOrg *AdminOrg) removeAllVApps() error { + for _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs { + adminVdcHREF, err := url.Parse(vdcs.HREF) + if err != nil { + return err + } + vdc, err := adminOrg.getVdcByAdminHREF(adminVdcHREF) + if err != nil { + return fmt.Errorf("error retrieving vapp with url: %s and with error %s", adminVdcHREF.Path, err) + } + err = vdc.removeAllVdcVApps() + if err != nil { + return fmt.Errorf("error deleting vapp: %s", err) + } + } + return nil +} + +// Given an adminorg with a valid HREF, the function refetches the adminorg +// and updates the user's adminorg data. Otherwise if the function fails, +// it returns an error. Users should use refresh whenever they have +// a stale org due to the creation/update/deletion of a resource +// within the org or the org itself. +func (adminOrg *AdminOrg) Refresh() error { + if *adminOrg == (AdminOrg{}) { + return fmt.Errorf("cannot refresh, Object is empty") + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + unmarshalledAdminOrg := &types.AdminOrg{} + + _, err := adminOrg.client.ExecuteRequest(adminOrg.AdminOrg.HREF, http.MethodGet, + "", "error refreshing organization: %s", nil, unmarshalledAdminOrg) + if err != nil { + return err + } + adminOrg.AdminOrg = unmarshalledAdminOrg + + return nil +} + +// Gets a vdc within org associated with an admin vdc url +func (adminOrg *AdminOrg) getVdcByAdminHREF(adminVdcUrl *url.URL) (*Vdc, error) { + // get non admin vdc path + vdcURL := adminOrg.client.VCDHREF + vdcURL.Path += strings.Split(adminVdcUrl.Path, "/api/admin")[1] //gets id + + vdc := NewVdc(adminOrg.client) + + vdc.parent = adminOrg + + _, err := adminOrg.client.ExecuteRequest(vdcURL.String(), http.MethodGet, + "", "error retrieving vdc: %s", nil, vdc.Vdc) + + return vdc, err +} + +// Removes all vdcs in a org +func (adminOrg *AdminOrg) removeAllOrgVDCs() error { + for _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs { + + adminVdcUrl := adminOrg.client.VCDHREF + splitVdcId := strings.Split(vdcs.HREF, "/api/vdc/") + if len(splitVdcId) == 1 { + adminVdcUrl.Path += "/admin/vdc/" + strings.Split(vdcs.HREF, "/api/admin/vdc/")[1] + "/action/disable" + } else { + adminVdcUrl.Path += "/admin/vdc/" + splitVdcId[1] + "/action/disable" + } + + req := adminOrg.client.NewRequest(map[string]string{}, http.MethodPost, adminVdcUrl, nil) + _, err := checkResp(adminOrg.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error disabling vdc: %s", err) + } + // Get admin vdc HREF for normal deletion + adminVdcUrl.Path = strings.Split(adminVdcUrl.Path, "/action/disable")[0] + req = adminOrg.client.NewRequest(map[string]string{ + "recursive": "true", + "force": "true", + }, http.MethodDelete, adminVdcUrl, nil) + resp, err := checkResp(adminOrg.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error deleting vdc: %s", err) + } + task := NewTask(adminOrg.client) + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + if task.Task.Status == "error" { + return fmt.Errorf("vdc not properly destroyed") + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish removing vdc %s", err) + } + + } + + return nil +} + +// Removes All networks in the org +func (adminOrg *AdminOrg) removeAllOrgNetworks() error { + for _, networks := range adminOrg.AdminOrg.Networks.Networks { + // Get Network HREF + networkHREF := adminOrg.client.VCDHREF + networkHREF.Path += "/admin/network/" + strings.Split(networks.HREF, "/api/admin/network/")[1] //gets id + + task, err := adminOrg.client.ExecuteTaskRequest(networkHREF.String(), http.MethodDelete, + "", "error deleting network: %s", nil) + if err != nil { + return err + } + + if task.Task.Status == "error" { + return fmt.Errorf("network not properly destroyed") + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish removing network %s", err) + } + } + return nil +} + +// removeCatalogs force removal of all organization catalogs +func (adminOrg *AdminOrg) removeCatalogs() error { + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + isCatalogFromSameOrg, err := isCatalogFromSameOrg(adminOrg, catalog.Name) + if err != nil { + return fmt.Errorf("error deleting catalog: %s", err) + } + if isCatalogFromSameOrg { + // Get Catalog HREF + catalogHREF := adminOrg.client.VCDHREF + catalogHREF.Path += "/admin/catalog/" + strings.Split(catalog.HREF, "/api/admin/catalog/")[1] //gets id + req := adminOrg.client.NewRequest(map[string]string{ + "force": "true", + "recursive": "true", + }, http.MethodDelete, catalogHREF, nil) + _, err := checkResp(adminOrg.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error deleting catalog: %s, %s", err, catalogHREF.Path) + } + } + } + return nil + +} + +// isCatalogFromSameOrg checks if catalog is in same Org. Shared catalogs from other Org are showed as normal one +// in some API responses. +func isCatalogFromSameOrg(adminOrg *AdminOrg, catalogName string) (bool, error) { + foundCatalogs, err := adminOrg.FindAdminCatalogRecords(catalogName) + if err != nil { + return false, err + } + + if len(foundCatalogs) == 1 { + return true, nil + } + return false, nil +} + +// FindAdminCatalogRecords uses catalog name to return AdminCatalogRecord information. +func (adminOrg *AdminOrg) FindAdminCatalogRecords(name string) ([]*types.CatalogRecord, error) { + util.Logger.Printf("[DEBUG] FindAdminCatalogRecords with name: %s and org name: %s", name, adminOrg.AdminOrg.Name) + results, err := adminOrg.client.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "adminCatalog", + "filter": fmt.Sprintf("name==%s;orgName==%s", url.QueryEscape(name), url.QueryEscape(adminOrg.AdminOrg.Name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + util.Logger.Printf("[DEBUG] FindAdminCatalogRecords returned with : %#v and error: %s", results.Results.AdminCatalogRecord, err) + return results.Results.AdminCatalogRecord, nil +} + +// Given a valid catalog name, FindAdminCatalog returns an AdminCatalog object. +// If no catalog is found, then returns an empty AdminCatalog and no error. +// Otherwise it returns an error. Function allows user to use an AdminOrg +// to also fetch a Catalog. If user does not have proper credentials to +// perform administrator tasks then function returns an error. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/GET-Catalog-AdminView.html +// Deprecated: Use adminOrg.GetAdminCatalog instead +func (adminOrg *AdminOrg) FindAdminCatalog(catalogName string) (AdminCatalog, error) { + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + // Get Catalog HREF + if catalog.Name == catalogName { + adminCatalog := NewAdminCatalog(adminOrg.client) + _, err := adminOrg.client.ExecuteRequest(catalog.HREF, http.MethodGet, + "", "error retrieving catalog: %s", nil, adminCatalog.AdminCatalog) + // The request was successful + return *adminCatalog, err + } + } + return AdminCatalog{}, nil +} + +// Given a valid catalog name, FindCatalog returns a Catalog object. +// If no catalog is found, then returns an empty catalog and no error. +// Otherwise it returns an error. Function allows user to use an AdminOrg +// to also fetch a Catalog. +// Deprecated: Use adminOrg.GetCatalogByName instead +func (adminOrg *AdminOrg) FindCatalog(catalogName string) (Catalog, error) { + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + // Get Catalog HREF + if catalog.Name == catalogName { + catalogURL := adminOrg.client.VCDHREF + catalogURL.Path += "/catalog/" + strings.Split(catalog.HREF, "/api/admin/catalog/")[1] //gets id + + cat := NewCatalog(adminOrg.client) + + _, err := adminOrg.client.ExecuteRequest(catalogURL.String(), http.MethodGet, + "", "error retrieving catalog: %s", nil, cat.Catalog) + + // The request was successful + return *cat, err + } + } + return Catalog{}, nil +} + +// GetCatalogByHref finds a Catalog by HREF +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetCatalogByHref(catalogHref string) (*Catalog, error) { + splitByAdminHREF := strings.Split(catalogHref, "/api/admin") + + // admin user and normal user will have different urls + var catalogHREF string + if len(splitByAdminHREF) == 1 { + catalogHREF = catalogHref + } else { + catalogHREF = splitByAdminHREF[0] + "/api" + splitByAdminHREF[1] + } + + cat := NewCatalog(adminOrg.client) + + _, err := adminOrg.client.ExecuteRequest(catalogHREF, http.MethodGet, + "", "error retrieving catalog: %s", nil, cat.Catalog) + + if err != nil { + return nil, err + } + cat.parent = adminOrg + // The request was successful + return cat, nil +} + +// GetCatalogByName finds a Catalog by Name +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetCatalogByName(catalogName string, refresh bool) (*Catalog, error) { + + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + if catalog.Name == catalogName { + return adminOrg.GetCatalogByHref(catalog.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// Extracts an UUID from a string, regardless of surrounding text +// Returns an empty string if no UUID was found +func extractUuid(input string) string { + reGetID := regexp.MustCompile(`([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})`) + matchListId := reGetID.FindAllStringSubmatch(input, -1) + if len(matchListId) > 0 && len(matchListId[0]) > 0 { + return matchListId[0][1] + } + return "" +} + +// equalIds compares two IDs and return true if they are the same +// The comparison happens by extracting the bare UUID from both the +// wanted ID and the found one. +// When the found ID is empty, it used the HREF for such comparison, +// This function is useful when the reference structure in the parent lookup list +// may lack the ID (such as in Org.Links, AdminOrg.Catalogs) or has an ID +// that is only a UUID without prefixes (such as in CatalogItem list) +// +// wantedId is the input string to compare +// foundId is the ID field in the reference record (can be empty) +// foundHref is the HREF field in the reference record (should never be empty) +func equalIds(wantedId, foundId, foundHref string) bool { + + wantedUuid := extractUuid(wantedId) + foundUuid := "" + + if wantedUuid == "" { + return false + } + if foundId != "" { + // In some entities, the ID is a simple UUID without prefix + foundUuid = extractUuid(foundId) + } else { + foundUuid = extractUuid(foundHref) + } + return foundUuid == wantedUuid +} + +// GetCatalogById finds a Catalog by ID +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetCatalogById(catalogId string, refresh bool) (*Catalog, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + if equalIds(catalogId, catalog.ID, catalog.HREF) { + return adminOrg.GetCatalogByHref(catalog.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetCatalogByNameOrId finds a Catalog by name or ID +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetCatalogByNameOrId(identifier string, refresh bool) (*Catalog, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetCatalogByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return adminOrg.GetCatalogById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*Catalog), err +} + +// GetAdminCatalogByHref finds an AdminCatalog by HREF +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminCatalogByHref(catalogHref string) (*AdminCatalog, error) { + adminCatalog := NewAdminCatalog(adminOrg.client) + + _, err := adminOrg.client.ExecuteRequest(catalogHref, http.MethodGet, + "", "error retrieving catalog: %s", nil, adminCatalog.AdminCatalog) + + if err != nil { + return nil, err + } + + adminCatalog.parent = adminOrg + // The request was successful + return adminCatalog, nil +} + +// GetCatalogByName finds an AdminCatalog by Name +// On success, returns a pointer to the AdminCatalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminCatalogByName(catalogName string, refresh bool) (*AdminCatalog, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + // Get Catalog HREF + if catalog.Name == catalogName { + return adminOrg.GetAdminCatalogByHref(catalog.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetCatalogById finds an AdminCatalog by ID +// On success, returns a pointer to the AdminCatalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminCatalogById(catalogId string, refresh bool) (*AdminCatalog, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, catalog := range adminOrg.AdminOrg.Catalogs.Catalog { + // Get Catalog HREF + if equalIds(catalogId, catalog.ID, catalog.HREF) { + return adminOrg.GetAdminCatalogByHref(catalog.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetAdminCatalogByNameOrId finds an AdminCatalog by name or ID +// On success, returns a pointer to the AdminCatalog structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminCatalogByNameOrId(identifier string, refresh bool) (*AdminCatalog, error) { + getByName := func(name string, refresh bool) (interface{}, error) { + return adminOrg.GetAdminCatalogByName(name, refresh) + } + getById := func(id string, refresh bool) (interface{}, error) { + return adminOrg.GetAdminCatalogById(id, refresh) + } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*AdminCatalog), err +} + +// GetVDCByHref retrieves a VDC using a direct call with the HREF +func (adminOrg *AdminOrg) GetVDCByHref(vdcHref string) (*Vdc, error) { + splitByAdminHREF := strings.Split(vdcHref, "/api/admin") + + // admin user and normal user will have different urls + var vdcHREF string + if len(splitByAdminHREF) == 1 { + vdcHREF = vdcHref + } else { + vdcHREF = splitByAdminHREF[0] + "/api" + splitByAdminHREF[1] + } + + vdc := NewVdc(adminOrg.client) + + _, err := adminOrg.client.ExecuteRequest(vdcHREF, http.MethodGet, + "", "error getting vdc: %s", nil, vdc.Vdc) + + if err != nil { + return nil, err + } + vdc.parent = adminOrg + + return vdc, nil +} + +// GetVDCByName finds a VDC by Name +// On success, returns a pointer to the Vdc structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetVDCByName(vdcName string, refresh bool) (*Vdc, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, vdc := range adminOrg.AdminOrg.Vdcs.Vdcs { + if vdc.Name == vdcName { + return adminOrg.GetVDCByHref(vdc.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetVDCById finds a VDC by ID +// On success, returns a pointer to the Vdc structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetVDCById(vdcId string, refresh bool) (*Vdc, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, vdc := range adminOrg.AdminOrg.Vdcs.Vdcs { + if equalIds(vdcId, vdc.ID, vdc.HREF) { + return adminOrg.GetVDCByHref(vdc.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetVDCByNameOrId finds a VDC by name or ID +// On success, returns a pointer to the VDC structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetVDCByNameOrId(identifier string, refresh bool) (*Vdc, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetVDCByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return adminOrg.GetVDCById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*Vdc), err +} + +// If user specifies valid vdc name then this returns a vdc object. +// If no vdc is found, then it returns an empty vdc and no error. +// Otherwise it returns an empty vdc and an error. This function +// allows users to use an AdminOrg to fetch a vdc as well. +// Deprecated: Use adminOrg.GetVDCByName instead +func (adminOrg *AdminOrg) GetVdcByName(vdcname string) (Vdc, error) { + for _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs { + if vdcs.Name == vdcname { + splitByAdminHREF := strings.Split(vdcs.HREF, "/api/admin") + + // admin user and normal user will have different urls + var vdcHREF string + if len(splitByAdminHREF) == 1 { + vdcHREF = vdcs.HREF + } else { + vdcHREF = splitByAdminHREF[0] + "/api" + splitByAdminHREF[1] + } + + vdc := NewVdc(adminOrg.client) + + _, err := adminOrg.client.ExecuteRequest(vdcHREF, http.MethodGet, + "", "error getting vdc: %s", nil, vdc.Vdc) + + return *vdc, err + } + } + return Vdc{}, nil +} + +// QueryCatalogList returns a list of catalogs for this organization +func (adminOrg *AdminOrg) QueryCatalogList() ([]*types.CatalogRecord, error) { + util.Logger.Printf("[DEBUG] QueryCatalogList with org name %s", adminOrg.AdminOrg.Name) + queryType := types.QtCatalog + if adminOrg.client.IsSysAdmin { + queryType = types.QtAdminCatalog + } + results, err := adminOrg.client.cumulativeQuery(queryType, nil, map[string]string{ + "type": queryType, + "filter": fmt.Sprintf("orgName==%s", url.QueryEscape(adminOrg.AdminOrg.Name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + var catalogs []*types.CatalogRecord + + if adminOrg.client.IsSysAdmin { + catalogs = results.Results.AdminCatalogRecord + } else { + catalogs = results.Results.CatalogRecord + } + util.Logger.Printf("[DEBUG] QueryCatalogList returned with : %#v and error: %s", catalogs, err) + return catalogs, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg_administration.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg_administration.go new file mode 100644 index 000000000..e6413f07e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminorg_administration.go @@ -0,0 +1,61 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// LdapConfigure allows to configure LDAP mode in use by the Org +func (adminOrg *AdminOrg) LdapConfigure(settings *types.OrgLdapSettingsType) (*types.OrgLdapSettingsType, error) { + util.Logger.Printf("[DEBUG] Configuring LDAP mode for Org name %s", adminOrg.AdminOrg.Name) + + // Xmlns field is not mandatory when `types.OrgLdapSettingsType` is set as part of whole + // `AdminOrg` structure but it must be set when directly updating LDAP. For that reason + // `types.OrgLdapSettingsType` Xmlns struct tag has 'omitempty' set + settings.Xmlns = types.XMLNamespaceVCloud + + href := adminOrg.AdminOrg.HREF + "/settings/ldap" + _, err := adminOrg.client.ExecuteRequest(href, http.MethodPut, types.MimeOrgLdapSettings, + "error updating LDAP settings: %s", settings, nil) + if err != nil { + return nil, fmt.Errorf("error updating LDAP mode for Org name '%s': %s", adminOrg.AdminOrg.Name, err) + } + + ldapSettings, err := adminOrg.GetLdapConfiguration() + if err != nil { + return nil, fmt.Errorf("error retrieving LDAP configuration: %s", err) + } + + return ldapSettings, nil +} + +// LdapDisable wraps LdapConfigure to disable LDAP configuration for org +func (adminOrg *AdminOrg) LdapDisable() error { + _, err := adminOrg.LdapConfigure(&types.OrgLdapSettingsType{OrgLdapMode: types.LdapModeNone}) + return err +} + +// GetLdapConfiguration retrieves LDAP configuration structure +func (adminOrg *AdminOrg) GetLdapConfiguration() (*types.OrgLdapSettingsType, error) { + util.Logger.Printf("[DEBUG] Reading LDAP configuration for Org name %s", adminOrg.AdminOrg.Name) + + ldapSettings := &types.OrgLdapSettingsType{} + + href := adminOrg.AdminOrg.HREF + "/settings/ldap" + + _, err := adminOrg.client.ExecuteRequest(href, http.MethodGet, types.MimeOrgLdapSettings, + "error getting LDAP settings: %s", nil, ldapSettings) + + if err != nil { + return nil, err + } + + return ldapSettings, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminvdc.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminvdc.go new file mode 100644 index 000000000..5f58785bb --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/adminvdc.go @@ -0,0 +1,594 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type AdminVdc struct { + AdminVdc *types.AdminVdc + client *Client + parent organization +} + +func NewAdminVdc(cli *Client) *AdminVdc { + return &AdminVdc{ + AdminVdc: new(types.AdminVdc), + client: cli, + } +} + +// vdcVersionedFuncs is a generic representation of VDC CRUD operations across multiple versions +type vdcVersionedFuncs struct { + SupportedVersion string + CreateVdc func(adminOrg *AdminOrg, vdcConfiguration *types.VdcConfiguration) (*Vdc, error) + CreateVdcAsync func(adminOrg *AdminOrg, vdcConfiguration *types.VdcConfiguration) (Task, error) + UpdateVdc func(adminVdc *AdminVdc) (*AdminVdc, error) + UpdateVdcAsync func(adminVdc *AdminVdc) (Task, error) +} + +// VDC function mapping for API version 32.0 (from vCD 9.7) +var vdcVersionedFuncsV97 = vdcVersionedFuncs{ + SupportedVersion: "32.0", + CreateVdc: createVdcV97, + CreateVdcAsync: createVdcAsyncV97, + UpdateVdc: updateVdcV97, + UpdateVdcAsync: updateVdcAsyncV97, +} + +// vdcVersionedFuncsByVcdVersion is a map of VDC functions by vCD version +var vdcVersionedFuncsByVcdVersion = map[string]vdcVersionedFuncs{ + "vdc9.7": vdcVersionedFuncsV97, + + // If we add a new function to this list, we also need to update the "default" entry + // The "default" entry will hold the highest currently available function + "default": vdcVersionedFuncsV97, +} + +// getVdcVersionedFuncsByVdcVersion is a wrapper function that retrieves the requested versioned VDC function +// When the wanted version does not exist in the map, it returns the highest available one. +func getVdcVersionedFuncsByVdcVersion(version string) vdcVersionedFuncs { + f, ok := vdcVersionedFuncsByVcdVersion[version] + if ok { + return f + } else { + return vdcVersionedFuncsByVcdVersion["default"] + } +} + +// GetAdminVdcByName function uses a valid VDC name and returns a admin VDC object. +// If no VDC is found, then it returns an empty VDC and no error. +// Otherwise it returns an empty VDC and an error. +// Deprecated: Use adminOrg.GetAdminVDCByName +func (adminOrg *AdminOrg) GetAdminVdcByName(vdcname string) (AdminVdc, error) { + for _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs { + if vdcs.Name == vdcname { + adminVdc := NewAdminVdc(adminOrg.client) + _, err := adminOrg.client.ExecuteRequest(vdcs.HREF, http.MethodGet, + "", "error getting vdc: %s", nil, adminVdc.AdminVdc) + return *adminVdc, err + } + } + return AdminVdc{}, nil +} + +// GetAdminVDCByHref retrieves a VDC using a direct call with the HREF +func (adminOrg *AdminOrg) GetAdminVDCByHref(vdcHref string) (*AdminVdc, error) { + adminVdc := NewAdminVdc(adminOrg.client) + adminVdc.parent = adminOrg + _, err := adminOrg.client.ExecuteRequest(vdcHref, http.MethodGet, + "", "error getting vdc: %s", nil, adminVdc.AdminVdc) + + if err != nil { + return nil, err + } + return adminVdc, nil +} + +// GetAdminVDCByName finds an Admin VDC by Name +// On success, returns a pointer to the AdminVdc structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminVDCByName(vdcName string, refresh bool) (*AdminVdc, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, vdc := range adminOrg.AdminOrg.Vdcs.Vdcs { + if vdc.Name == vdcName { + return adminOrg.GetAdminVDCByHref(vdc.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetAdminVDCById finds an Admin VDC by ID +// On success, returns a pointer to the AdminVdc structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminVDCById(vdcId string, refresh bool) (*AdminVdc, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + for _, vdc := range adminOrg.AdminOrg.Vdcs.Vdcs { + if equalIds(vdcId, vdc.ID, vdc.HREF) { + return adminOrg.GetAdminVDCByHref(vdc.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetAdminVDCByNameOrId finds an Admin VDC by Name Or ID +// On success, returns a pointer to the AdminVdc structure and a nil error +// On failure, returns a nil pointer and an error +func (adminOrg *AdminOrg) GetAdminVDCByNameOrId(identifier string, refresh bool) (*AdminVdc, error) { + getByName := func(name string, refresh bool) (interface{}, error) { + return adminOrg.GetAdminVDCByName(name, refresh) + } + getById := func(id string, refresh bool) (interface{}, error) { return adminOrg.GetAdminVDCById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*AdminVdc), err +} + +// CreateVdc creates a VDC with the given params under the given organization. +// Returns an AdminVdc. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/POST-VdcConfiguration.html +// Deprecated in favor of adminOrg.CreateOrgVdcAsync +func (adminOrg *AdminOrg) CreateVdc(vdcConfiguration *types.VdcConfiguration) (Task, error) { + err := validateVdcConfiguration(vdcConfiguration) + if err != nil { + return Task{}, err + } + + vdcConfiguration.Xmlns = types.XMLNamespaceVCloud + + vdcCreateHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return Task{}, fmt.Errorf("error parsing admin org url: %s", err) + } + vdcCreateHREF.Path += "/vdcsparams" + + adminVdc := NewAdminVdc(adminOrg.client) + + _, err = adminOrg.client.ExecuteRequest(vdcCreateHREF.String(), http.MethodPost, + "application/vnd.vmware.admin.createVdcParams+xml", "error creating VDC: %s", vdcConfiguration, adminVdc.AdminVdc) + if err != nil { + return Task{}, err + } + + // Return the task + task := NewTask(adminOrg.client) + task.Task = adminVdc.AdminVdc.Tasks.Task[0] + return *task, nil +} + +// Creates the VDC and waits for the asynchronous task to complete. +// Deprecated in favor of adminOrg.CreateOrgVdc +func (adminOrg *AdminOrg) CreateVdcWait(vdcDefinition *types.VdcConfiguration) error { + task, err := adminOrg.CreateVdc(vdcDefinition) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish creating VDC %s", err) + } + return nil +} + +// Given an adminVdc with a valid HREF, the function refresh the adminVdc +// and updates the adminVdc data. Returns an error on failure +// Users should use refresh whenever they suspect +// a stale VDC due to the creation/update/deletion of a resource +// within the the VDC itself. +func (adminVdc *AdminVdc) Refresh() error { + if *adminVdc == (AdminVdc{}) || adminVdc.AdminVdc.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty or HREF is empty") + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + unmarshalledAdminVdc := &types.AdminVdc{} + + _, err := adminVdc.client.ExecuteRequest(adminVdc.AdminVdc.HREF, http.MethodGet, + "", "error refreshing VDC: %s", nil, unmarshalledAdminVdc) + if err != nil { + return err + } + adminVdc.AdminVdc = unmarshalledAdminVdc + + return nil +} + +// UpdateAsync updates VDC from current VDC struct contents. +// Any differences that may be legally applied will be updated. +// Returns an error if the call to vCD fails. +// API Documentation: https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/operations/PUT-Vdc.html +func (adminVdc *AdminVdc) UpdateAsync() (Task, error) { + apiVersion, err := adminVdc.client.MaxSupportedVersion() + if err != nil { + return Task{}, err + } + vdcFunctions := getVdcVersionedFuncsByVdcVersion("vdc" + apiVersionToVcdVersion[apiVersion]) + if vdcFunctions.UpdateVdcAsync == nil { + return Task{}, fmt.Errorf("function UpdateVdcAsync is not defined for %s", "vdc"+apiVersion) + } + util.Logger.Printf("[DEBUG] UpdateAsync call function for version %s", vdcFunctions.SupportedVersion) + + return vdcFunctions.UpdateVdcAsync(adminVdc) + +} + +// Update function updates an Admin VDC from current VDC struct contents. +// Any differences that may be legally applied will be updated. +// Returns an empty AdminVdc struct and error if the call to vCD fails. +// API Documentation: https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/operations/PUT-Vdc.html +func (adminVdc *AdminVdc) Update() (AdminVdc, error) { + apiVersion, err := adminVdc.client.MaxSupportedVersion() + if err != nil { + return AdminVdc{}, err + } + + vdcFunctions := getVdcVersionedFuncsByVdcVersion("vdc" + apiVersionToVcdVersion[apiVersion]) + if vdcFunctions.UpdateVdc == nil { + return AdminVdc{}, fmt.Errorf("function UpdateVdc is not defined for %s", "vdc"+apiVersion) + } + + util.Logger.Printf("[DEBUG] Update call function for version %s", vdcFunctions.SupportedVersion) + + updatedAdminVdc, err := vdcFunctions.UpdateVdc(adminVdc) + if err != nil { + return AdminVdc{}, err + } + return *updatedAdminVdc, err +} + +// CreateOrgVdc creates a VDC with the given params under the given organization +// and waits for the asynchronous task to complete. +// Returns an AdminVdc pointer and an error. +func (adminOrg *AdminOrg) CreateOrgVdc(vdcConfiguration *types.VdcConfiguration) (*Vdc, error) { + apiVersion, err := adminOrg.client.MaxSupportedVersion() + if err != nil { + return nil, err + } + vdcFunctions := getVdcVersionedFuncsByVdcVersion("vdc" + apiVersionToVcdVersion[apiVersion]) + if vdcFunctions.CreateVdc == nil { + return nil, fmt.Errorf("function CreateVdc is not defined for %s", "vdc"+apiVersion) + } + + util.Logger.Printf("[DEBUG] CreateOrgVdc call function for version %s", vdcFunctions.SupportedVersion) + return vdcFunctions.CreateVdc(adminOrg, vdcConfiguration) +} + +// CreateOrgVdcAsync creates a VDC with the given params under the given organization. +// Returns a Task and an error. +func (adminOrg *AdminOrg) CreateOrgVdcAsync(vdcConfiguration *types.VdcConfiguration) (Task, error) { + apiVersion, err := adminOrg.client.MaxSupportedVersion() + if err != nil { + return Task{}, err + } + vdcFunctions := getVdcVersionedFuncsByVdcVersion("vdc" + apiVersionToVcdVersion[apiVersion]) + if vdcFunctions.CreateVdcAsync == nil { + return Task{}, fmt.Errorf("function CreateVdcAsync is not defined for %s", "vdc"+apiVersion) + } + + util.Logger.Printf("[DEBUG] CreateOrgVdcAsync call function for version %s", vdcFunctions.SupportedVersion) + + return vdcFunctions.CreateVdcAsync(adminOrg, vdcConfiguration) +} + +// updateVdcAsyncV97 updates a VDC with the given params. Supports Flex type allocation. +// Needs vCD 9.7+ to work. Returns a Task and an error. +func updateVdcAsyncV97(adminVdc *AdminVdc) (Task, error) { + util.Logger.Printf("[TRACE] updateVdcAsyncV97 called %#v", *adminVdc) + adminVdc.AdminVdc.Xmlns = types.XMLNamespaceVCloud + + // Return the task + return adminVdc.client.ExecuteTaskRequest(adminVdc.AdminVdc.HREF, http.MethodPut, + types.MimeAdminVDC, "error updating VDC: %s", adminVdc.AdminVdc) +} + +// updateVdcV97 updates a VDC with the given params +// and waits for the asynchronous task to complete. Supports Flex type allocation. +// Needs vCD 9.7+ to work. Returns an AdminVdc pointer and an error. +func updateVdcV97(adminVdc *AdminVdc) (*AdminVdc, error) { + util.Logger.Printf("[TRACE] updateVdcV97 called %#v", *adminVdc) + task, err := updateVdcAsyncV97(adminVdc) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, err + } + err = adminVdc.Refresh() + if err != nil { + return nil, err + } + return adminVdc, nil +} + +// createVdcV97 creates a VDC with the given params under the given organization +// and waits for the asynchronous task to complete. Supports Flex type allocation. +// Needs vCD 9.7+ to work. Returns a Vdc pointer and error. +func createVdcV97(adminOrg *AdminOrg, vdcConfiguration *types.VdcConfiguration) (*Vdc, error) { + util.Logger.Printf("[TRACE] createVdcV97 called %#v", *vdcConfiguration) + task, err := createVdcAsyncV97(adminOrg, vdcConfiguration) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("couldn't finish creating VDC %s", err) + } + + vdc, err := adminOrg.GetVDCByName(vdcConfiguration.Name, true) + if err != nil { + return nil, err + } + return vdc, nil +} + +// createVdcAsyncV97 creates a VDC with the given params under the given organization. Supports Flex type allocation. +// Needs vCD 9.7+ to work. Returns a Task and an error +func createVdcAsyncV97(adminOrg *AdminOrg, vdcConfiguration *types.VdcConfiguration) (Task, error) { + util.Logger.Printf("[TRACE] createVdcAsyncV97 called %#v", *vdcConfiguration) + err := validateVdcConfigurationV97(*vdcConfiguration) + if err != nil { + return Task{}, err + } + + vdcConfiguration.Xmlns = types.XMLNamespaceVCloud + + vdcCreateHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return Task{}, fmt.Errorf("error parsing admin org url: %s", err) + } + vdcCreateHREF.Path += "/vdcsparams" + + adminVdc := NewAdminVdc(adminOrg.client) + + _, err = adminOrg.client.ExecuteRequest(vdcCreateHREF.String(), http.MethodPost, + "application/vnd.vmware.admin.createVdcParams+xml", "error creating VDC: %s", + vdcConfiguration, adminVdc.AdminVdc) + if err != nil { + return Task{}, err + } + + // Return the task + task := NewTask(adminOrg.client) + task.Task = adminVdc.AdminVdc.Tasks.Task[0] + return *task, nil +} + +// validateVdcConfigurationV97 uses validateVdcConfiguration and additionally checks Flex dependent values +func validateVdcConfigurationV97(vdcDefinition types.VdcConfiguration) error { + err := validateVdcConfiguration(&vdcDefinition) + if err != nil { + return err + } + if vdcDefinition.AllocationModel == "Flex" && vdcDefinition.IsElastic == nil { + return errors.New("VdcConfiguration missing required field: IsElastic") + } + if vdcDefinition.AllocationModel == "Flex" && vdcDefinition.IncludeMemoryOverhead == nil { + return errors.New("VdcConfiguration missing required field: IncludeMemoryOverhead") + } + return nil +} + +// GetVappList returns the list of vApps for an Admin VDC +func (vdc *AdminVdc) GetVappList() []*types.ResourceReference { + var list []*types.ResourceReference + for _, resourceEntities := range vdc.AdminVdc.ResourceEntities { + for _, resourceReference := range resourceEntities.ResourceEntity { + if resourceReference.Type == types.MimeVApp { + list = append(list, resourceReference) + } + } + } + return list +} + +// UpdateStorageProfile updates VDC storage profile and returns refreshed VDC or error. +func (vdc *AdminVdc) UpdateStorageProfile(storageProfileId string, storageProfile *types.AdminVdcStorageProfile) (*types.AdminVdcStorageProfile, error) { + if vdc.client.VCDHREF.String() == "" { + return nil, fmt.Errorf("cannot update VDC storage profile, VCD HREF is unset") + } + + queryUrl := vdc.client.VCDHREF + queryUrl.Path += "/admin/vdcStorageProfile/" + storageProfileId + + storageProfile.Xmlns = types.XMLNamespaceVCloud + updateAdminVdcStorageProfile := &types.AdminVdcStorageProfile{} + + _, err := vdc.client.ExecuteRequest(queryUrl.String(), http.MethodPut, + types.MimeStorageProfile, "error updating VDC storage profile: %s", storageProfile, updateAdminVdcStorageProfile) + if err != nil { + return nil, fmt.Errorf("cannot update VDC storage profile, error: %s", err) + } + + return updateAdminVdcStorageProfile, err +} + +// AddStorageProfile adds a storage profile to a VDC +func (vdc *AdminVdc) AddStorageProfile(storageProfile *types.VdcStorageProfileConfiguration, description string) (Task, error) { + if vdc.client.VCDHREF.String() == "" { + return Task{}, fmt.Errorf("cannot add VDC storage profile, VCD HREF is unset") + } + + href := vdc.AdminVdc.HREF + "/vdcStorageProfiles" + + var updateStorageProfile = types.UpdateVdcStorageProfiles{ + Xmlns: types.XMLNamespaceVCloud, + Name: storageProfile.ProviderVdcStorageProfile.Name, + Description: description, + AddStorageProfile: storageProfile, + RemoveStorageProfile: nil, + } + + task, err := vdc.client.ExecuteTaskRequest(href, http.MethodPost, + types.MimeUpdateVdcStorageProfiles, "error adding VDC storage profile: %s", &updateStorageProfile) + if err != nil { + return Task{}, fmt.Errorf("cannot add VDC storage profile, error: %s", err) + } + + return task, nil +} + +// AddStorageProfileWait adds a storage profile to a VDC and return a refreshed VDC +func (vdc *AdminVdc) AddStorageProfileWait(storageProfile *types.VdcStorageProfileConfiguration, description string) error { + task, err := vdc.AddStorageProfile(storageProfile, description) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return err + } + return vdc.Refresh() +} + +// RemoveStorageProfile remove a storage profile from a VDC +func (vdc *AdminVdc) RemoveStorageProfile(storageProfileName string) (Task, error) { + if vdc.client.VCDHREF.String() == "" { + return Task{}, fmt.Errorf("cannot remove VDC storage profile: VCD HREF is unset") + } + + var storageProfile *types.Reference + for _, sp := range vdc.AdminVdc.VdcStorageProfiles.VdcStorageProfile { + if sp.Name == storageProfileName { + storageProfile = sp + } + } + if storageProfile == nil { + return Task{}, fmt.Errorf("cannot remove VDC storage profile: storage profile '%s' not found in VDC", storageProfileName) + } + + vdcStorageProfileDetails, err := vdc.client.GetStorageProfileByHref(storageProfile.HREF) + if err != nil { + return Task{}, fmt.Errorf("cannot retrieve VDC storage profile '%s' details: %s", storageProfileName, err) + } + if vdcStorageProfileDetails.Enabled { + _, err = vdc.UpdateStorageProfile(extractUuid(storageProfile.HREF), &types.AdminVdcStorageProfile{ + Name: vdcStorageProfileDetails.Name, + Units: vdcStorageProfileDetails.Units, + Limit: vdcStorageProfileDetails.Limit, + Default: false, + Enabled: takeBoolPointer(false), + ProviderVdcStorageProfile: &types.Reference{ + HREF: vdcStorageProfileDetails.ProviderVdcStorageProfile.HREF, + }, + }, + ) + if err != nil { + return Task{}, fmt.Errorf("cannot disable VDC storage profile '%s': %s", storageProfileName, err) + } + } + + href := vdc.AdminVdc.HREF + "/vdcStorageProfiles" + + var updateStorageProfile = types.UpdateVdcStorageProfiles{ + Xmlns: types.XMLNamespaceVCloud, + Name: vdcStorageProfileDetails.Name, + Description: "", + RemoveStorageProfile: storageProfile, + } + + task, err := vdc.client.ExecuteTaskRequest(href, http.MethodPost, + types.MimeUpdateVdcStorageProfiles, "error removing VDC storage profile: %s", &updateStorageProfile) + if err != nil { + return Task{}, fmt.Errorf("cannot remove VDC storage profile, error: %s", err) + } + + return task, nil +} + +// RemoveStorageProfileWait removes a storege profile from a VDC and returns a refreshed VDC or an error +func (vdc *AdminVdc) RemoveStorageProfileWait(storageProfileName string) error { + task, err := vdc.RemoveStorageProfile(storageProfileName) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return err + } + return vdc.Refresh() +} + +// SetDefaultStorageProfile sets a given storage profile as default +// This operation will automatically unset the previous default storage profile. +func (vdc *AdminVdc) SetDefaultStorageProfile(storageProfileName string) error { + if vdc.client.VCDHREF.String() == "" { + return fmt.Errorf("cannot set VDC default storage profile: VCD HREF is unset") + } + + var storageProfile *types.Reference + for _, sp := range vdc.AdminVdc.VdcStorageProfiles.VdcStorageProfile { + if sp.Name == storageProfileName { + storageProfile = sp + } + } + if storageProfile == nil { + return fmt.Errorf("cannot set VDC default storage profile: storage profile '%s' not found in VDC", storageProfileName) + } + + vdcStorageProfileDetails, err := vdc.client.GetStorageProfileByHref(storageProfile.HREF) + if err != nil { + return fmt.Errorf("cannot retrieve VDC storage profile '%s' details: %s", storageProfileName, err) + } + _, err = vdc.UpdateStorageProfile(extractUuid(storageProfile.HREF), &types.AdminVdcStorageProfile{ + Name: vdcStorageProfileDetails.Name, + Units: vdcStorageProfileDetails.Units, + Limit: vdcStorageProfileDetails.Limit, + Default: true, + Enabled: takeBoolPointer(true), + ProviderVdcStorageProfile: &types.Reference{ + HREF: vdcStorageProfileDetails.ProviderVdcStorageProfile.HREF, + }, + }, + ) + if err != nil { + return fmt.Errorf("cannot set VDC default storage profile '%s': %s", storageProfileName, err) + } + return vdc.Refresh() +} + +// GetDefaultStorageProfileReference finds the default storage profile for the VDC +func (adminVdc *AdminVdc) GetDefaultStorageProfileReference() (*types.Reference, error) { + var defaultSp *types.Reference + if adminVdc.AdminVdc.VdcStorageProfiles == nil || adminVdc.AdminVdc.VdcStorageProfiles.VdcStorageProfile == nil { + return nil, fmt.Errorf("no storage profiles found in VDC %s", adminVdc.AdminVdc.Name) + } + for _, sp := range adminVdc.AdminVdc.VdcStorageProfiles.VdcStorageProfile { + fullSp, err := adminVdc.client.GetStorageProfileByHref(sp.HREF) + if err != nil { + return nil, fmt.Errorf("error retrieving storage profile %s for VDC %s: %s", sp.Name, adminVdc.AdminVdc.Name, err) + } + if fullSp.Default { + if defaultSp != nil { + return nil, fmt.Errorf("more than one default storage profile found for VDC %s: '%s' and '%s'", adminVdc.AdminVdc.Name, sp.Name, defaultSp.Name) + } + defaultSp = sp + } + } + if defaultSp != nil { + return defaultSp, nil + } + return nil, fmt.Errorf("no default storage profile found for VDC %s", adminVdc.AdminVdc.Name) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api.go new file mode 100644 index 000000000..1b848e923 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api.go @@ -0,0 +1,773 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +// Package govcd provides a simple binding for vCloud Director REST APIs. +package govcd + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// Client provides a client to vCloud Director, values can be populated automatically using the Authenticate method. +type Client struct { + APIVersion string // The API version required + VCDToken string // Access Token (authorization header) + VCDAuthHeader string // Authorization header + VCDHREF url.URL // VCD API ENDPOINT + Http http.Client // HttpClient is the client to use. Default will be used if not provided. + IsSysAdmin bool // flag if client is connected as system administrator + UsingBearerToken bool // flag if client is using a bearer token + UsingAccessToken bool // flag if client is using an API token + + // MaxRetryTimeout specifies a time limit (in seconds) for retrying requests made by the SDK + // where vCloud director may take time to respond and retry mechanism is needed. + // This must be >0 to avoid instant timeout errors. + MaxRetryTimeout int + + // UseSamlAdfs specifies if SAML auth is used for authenticating vCD instead of local login. + // The following conditions must be met so that authentication SAML authentication works: + // * SAML IdP (Identity Provider) is Active Directory Federation Service (ADFS) + // * Authentication endpoint "/adfs/services/trust/13/usernamemixed" must be enabled on ADFS + // server + UseSamlAdfs bool + // CustomAdfsRptId allows to set custom Relaying Party Trust identifier. By default vCD Entity + // ID is used as Relaying Party Trust identifier. + CustomAdfsRptId string + + // UserAgent to send for API queries. Standard format is described as: + // "User-Agent: / " + UserAgent string + + supportedVersions SupportedVersions // Versions from /api/versions endpoint + customHeader http.Header +} + +// AuthorizationHeader header key used by default to set the authorization token. +const AuthorizationHeader = "X-Vcloud-Authorization" + +// BearerTokenHeader is the header key containing a bearer token +const BearerTokenHeader = "X-Vmware-Vcloud-Access-Token" + +const ApiTokenHeader = "API-token" + +// General purpose error to be used whenever an entity is not found from a "GET" request +// Allows a simpler checking of the call result +// such as +// if err == ErrorEntityNotFound { +// // do what is needed in case of not found +// } +var errorEntityNotFoundMessage = "[ENF] entity not found" +var ErrorEntityNotFound = fmt.Errorf(errorEntityNotFoundMessage) + +// Triggers for debugging functions that show requests and responses +var debugShowRequestEnabled = os.Getenv("GOVCD_SHOW_REQ") != "" +var debugShowResponseEnabled = os.Getenv("GOVCD_SHOW_RESP") != "" + +// Enables the debugging hook to show requests as they are processed. +//lint:ignore U1000 this function is used on request for debugging purposes +func enableDebugShowRequest() { + debugShowRequestEnabled = true +} + +// Disables the debugging hook to show requests as they are processed. +//lint:ignore U1000 this function is used on request for debugging purposes +func disableDebugShowRequest() { + debugShowRequestEnabled = false + err := os.Setenv("GOVCD_SHOW_REQ", "") + if err != nil { + util.Logger.Printf("[DEBUG - disableDebugShowRequest] error setting environment variable: %s", err) + } +} + +// Enables the debugging hook to show responses as they are processed. +//lint:ignore U1000 this function is used on request for debugging purposes +func enableDebugShowResponse() { + debugShowResponseEnabled = true +} + +// Disables the debugging hook to show responses as they are processed. +//lint:ignore U1000 this function is used on request for debugging purposes +func disableDebugShowResponse() { + debugShowResponseEnabled = false + err := os.Setenv("GOVCD_SHOW_RESP", "") + if err != nil { + util.Logger.Printf("[DEBUG - disableDebugShowResponse] error setting environment variable: %s", err) + } +} + +// On-the-fly debug hook. If either debugShowRequestEnabled or the environment +// variable "GOVCD_SHOW_REQ" are enabled, this function will show the contents +// of the request as it is being processed. +func debugShowRequest(req *http.Request, payload string) { + if debugShowRequestEnabled { + header := "[\n" + for key, value := range util.SanitizedHeader(req.Header) { + header += fmt.Sprintf("\t%s => %s\n", key, value) + } + header += "]\n" + fmt.Println("** REQUEST **") + fmt.Printf("time: %s\n", time.Now().Format("2006-01-02T15:04:05.000Z")) + fmt.Printf("method: %s\n", req.Method) + fmt.Printf("host: %s\n", req.Host) + fmt.Printf("length: %d\n", req.ContentLength) + fmt.Printf("URL: %s\n", req.URL.String()) + fmt.Printf("header: %s\n", header) + fmt.Printf("payload: %s\n", payload) + fmt.Println() + } +} + +// On-the-fly debug hook. If either debugShowResponseEnabled or the environment +// variable "GOVCD_SHOW_RESP" are enabled, this function will show the contents +// of the response as it is being processed. +func debugShowResponse(resp *http.Response, body []byte) { + if debugShowResponseEnabled { + fmt.Println("## RESPONSE ##") + fmt.Printf("time: %s\n", time.Now().Format("2006-01-02T15:04:05.000Z")) + fmt.Printf("status: %d - %s \n", resp.StatusCode, resp.Status) + fmt.Printf("length: %d\n", resp.ContentLength) + fmt.Printf("header: %v\n", util.SanitizedHeader(resp.Header)) + fmt.Printf("body: %s\n", body) + fmt.Println() + } +} + +// IsNotFound is a convenience function, similar to os.IsNotExist that checks whether a given error +// is a "Not found" error, such as +// if isNotFound(err) { +// // do what is needed in case of not found +// } +func IsNotFound(err error) bool { + return err != nil && err == ErrorEntityNotFound +} + +// ContainsNotFound is a convenience function, similar to os.IsNotExist that checks whether a given error +// contains a "Not found" error. It is almost the same as `IsNotFound` but checks if an error contains substring +// ErrorEntityNotFound +func ContainsNotFound(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrorEntityNotFound.Error()) +} + +// NewRequestWitNotEncodedParams allows passing complex values params that shouldn't be encoded like for queries. e.g. /query?filter=name=foo +func (client *Client) NewRequestWitNotEncodedParams(params map[string]string, notEncodedParams map[string]string, method string, reqUrl url.URL, body io.Reader) *http.Request { + return client.NewRequestWitNotEncodedParamsWithApiVersion(params, notEncodedParams, method, reqUrl, body, client.APIVersion) +} + +// NewRequestWitNotEncodedParamsWithApiVersion allows passing complex values params that shouldn't be encoded like for queries. e.g. /query?filter=name=foo +// * params - request parameters +// * notEncodedParams - request parameters which will be added not encoded +// * method - request type +// * reqUrl - request url +// * body - request body +// * apiVersion - provided Api version overrides default Api version value used in request parameter +func (client *Client) NewRequestWitNotEncodedParamsWithApiVersion(params map[string]string, notEncodedParams map[string]string, method string, reqUrl url.URL, body io.Reader, apiVersion string) *http.Request { + return client.newRequest(params, notEncodedParams, method, reqUrl, body, apiVersion, nil) +} + +// newRequest is the parent of many "specific" "NewRequest" functions. +// Note. It is kept private to avoid breaking public API on every new field addition. +func (client *Client) newRequest(params map[string]string, notEncodedParams map[string]string, method string, reqUrl url.URL, body io.Reader, apiVersion string, additionalHeader http.Header) *http.Request { + reqValues := url.Values{} + + // Build up our request parameters + for key, value := range params { + reqValues.Add(key, value) + } + + // Add the params to our URL + reqUrl.RawQuery = reqValues.Encode() + + for key, value := range notEncodedParams { + if key != "" && value != "" { + reqUrl.RawQuery += "&" + key + "=" + value + } + } + + // If the body contains data - try to read all contents for logging and re-create another + // io.Reader with all contents to use it down the line + var readBody []byte + var err error + if body != nil { + readBody, err = ioutil.ReadAll(body) + if err != nil { + util.Logger.Printf("[DEBUG - newRequest] error reading body: %s", err) + } + body = bytes.NewReader(readBody) + } + + req, err := http.NewRequest(method, reqUrl.String(), body) + if err != nil { + util.Logger.Printf("[DEBUG - newRequest] error getting new request: %s", err) + } + + if client.VCDAuthHeader != "" && client.VCDToken != "" { + // Add the authorization header + req.Header.Add(client.VCDAuthHeader, client.VCDToken) + } + if (client.VCDAuthHeader != "" && client.VCDToken != "") || + (additionalHeader != nil && additionalHeader.Get("Authorization") != "") { + // Add the Accept header for VCD + req.Header.Add("Accept", "application/*+xml;version="+apiVersion) + } + // The deprecated authorization token is 32 characters long + // The bearer token is 612 characters long + if len(client.VCDToken) > 32 { + req.Header.Add("X-Vmware-Vcloud-Token-Type", "Bearer") + req.Header.Add("Authorization", "bearer "+client.VCDToken) + } + + // Merge in additional headers before logging if any where specified in additionalHeader + // parameter + if additionalHeader != nil && len(additionalHeader) > 0 { + for headerName, headerValueSlice := range additionalHeader { + for _, singleHeaderValue := range headerValueSlice { + req.Header.Add(headerName, singleHeaderValue) + } + } + } + if client.customHeader != nil { + for k, v := range client.customHeader { + for _, v1 := range v { + req.Header.Add(k, v1) + } + } + } + + setHttpUserAgent(client.UserAgent, req) + + // Avoids passing data if the logging of requests is disabled + if util.LogHttpRequest { + payload := "" + if req.ContentLength > 0 { + payload = string(readBody) + } + util.ProcessRequestOutput(util.FuncNameCallStack(), method, reqUrl.String(), payload, req) + debugShowRequest(req, payload) + } + + return req + +} + +// NewRequest creates a new HTTP request and applies necessary auth headers if set. +func (client *Client) NewRequest(params map[string]string, method string, reqUrl url.URL, body io.Reader) *http.Request { + return client.NewRequestWitNotEncodedParams(params, nil, method, reqUrl, body) +} + +// NewRequestWithApiVersion creates a new HTTP request and applies necessary auth headers if set. +// Allows to override default request API Version +func (client *Client) NewRequestWithApiVersion(params map[string]string, method string, reqUrl url.URL, body io.Reader, apiVersion string) *http.Request { + return client.NewRequestWitNotEncodedParamsWithApiVersion(params, nil, method, reqUrl, body, apiVersion) +} + +// ParseErr takes an error XML resp, error interface for unmarshalling and returns a single string for +// use in error messages. +func ParseErr(bodyType types.BodyType, resp *http.Response, errType error) error { + // if there was an error decoding the body, just return that + if err := decodeBody(bodyType, resp, errType); err != nil { + util.Logger.Printf("[ParseErr]: unhandled response <--\n%+v\n-->\n", resp) + return fmt.Errorf("[ParseErr]: error parsing error body for non-200 request: %s (%+v)", err, resp) + } + + // response body maybe empty for some error, such like 416, 400 + if errType.Error() == "API Error: 0: " { + errType = fmt.Errorf(resp.Status) + } + + return errType +} + +// decodeBody is used to decode a response body of types.BodyType +func decodeBody(bodyType types.BodyType, resp *http.Response, out interface{}) error { + body, err := ioutil.ReadAll(resp.Body) + + // In case of JSON, body does not have indents in response therefore it must be indented + if bodyType == types.BodyTypeJSON { + body, err = indentJsonBody(body) + if err != nil { + return err + } + } + + util.ProcessResponseOutput(util.FuncNameCallStack(), resp, string(body)) + if err != nil { + return err + } + + debugShowResponse(resp, body) + + // only attempt to unmarshal if body is not empty + if len(body) > 0 { + switch bodyType { + case types.BodyTypeXML: + if err = xml.Unmarshal(body, &out); err != nil { + return err + } + case types.BodyTypeJSON: + if err = json.Unmarshal(body, &out); err != nil { + return err + } + + default: + panic(fmt.Sprintf("unknown body type: %d", bodyType)) + } + } + + return nil +} + +// indentJsonBody indents raw JSON body for easier readability +func indentJsonBody(body []byte) ([]byte, error) { + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, body, "", " ") + if err != nil { + return nil, fmt.Errorf("error indenting response JSON: %s", err) + } + body = prettyJSON.Bytes() + return body, nil +} + +// checkResp wraps http.Client.Do() and verifies the request, if status code +// is 2XX it passes back the response, if it's a known invalid status code it +// parses the resultant XML error and returns a descriptive error, if the +// status code is not handled it returns a generic error with the status code. +func checkResp(resp *http.Response, err error) (*http.Response, error) { + return checkRespWithErrType(types.BodyTypeXML, resp, err, &types.Error{}) +} + +// checkRespWithErrType allows to specify custom error errType for checkResp unmarshaling +// the error. +func checkRespWithErrType(bodyType types.BodyType, resp *http.Response, err, errType error) (*http.Response, error) { + if err != nil { + return resp, err + } + + switch resp.StatusCode { + // Valid request, return the response. + case + http.StatusOK, // 200 + http.StatusCreated, // 201 + http.StatusAccepted, // 202 + http.StatusNoContent, // 204 + http.StatusFound: // 302 + return resp, nil + // Invalid request, parse the XML error returned and return it. + case + http.StatusBadRequest, // 400 + http.StatusUnauthorized, // 401 + http.StatusForbidden, // 403 + http.StatusNotFound, // 404 + http.StatusMethodNotAllowed, // 405 + http.StatusNotAcceptable, // 406 + http.StatusProxyAuthRequired, // 407 + http.StatusRequestTimeout, // 408 + http.StatusConflict, // 409 + http.StatusGone, // 410 + http.StatusLengthRequired, // 411 + http.StatusPreconditionFailed, // 412 + http.StatusRequestEntityTooLarge, // 413 + http.StatusRequestURITooLong, // 414 + http.StatusUnsupportedMediaType, // 415 + http.StatusRequestedRangeNotSatisfiable, // 416 + http.StatusLocked, // 423 + http.StatusFailedDependency, // 424 + http.StatusUpgradeRequired, // 426 + http.StatusPreconditionRequired, // 428 + http.StatusTooManyRequests, // 429 + http.StatusRequestHeaderFieldsTooLarge, // 431 + http.StatusUnavailableForLegalReasons, // 451 + http.StatusInternalServerError, // 500 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout: // 504 + return nil, ParseErr(bodyType, resp, errType) + // Unhandled response. + default: + return nil, fmt.Errorf("unhandled API response, please report this issue, status code: %s", resp.Status) + } +} + +// ExecuteTaskRequest helper function creates request, runs it, checks response and parses task from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// E.g. client.ExecuteTaskRequest(updateDiskLink.HREF, http.MethodPut, updateDiskLink.Type, "error updating disk: %s", xmlPayload) +func (client *Client) ExecuteTaskRequest(pathURL, requestType, contentType, errorMessage string, payload interface{}) (Task, error) { + return client.executeTaskRequest(pathURL, requestType, contentType, errorMessage, payload, client.APIVersion) +} + +// ExecuteTaskRequestWithApiVersion helper function creates request, runs it, checks response and parses task from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// apiVersion - api version which will be used in request +// E.g. client.ExecuteTaskRequest(updateDiskLink.HREF, http.MethodPut, updateDiskLink.Type, "error updating disk: %s", xmlPayload) +func (client *Client) ExecuteTaskRequestWithApiVersion(pathURL, requestType, contentType, errorMessage string, payload interface{}, apiVersion string) (Task, error) { + return client.executeTaskRequest(pathURL, requestType, contentType, errorMessage, payload, apiVersion) +} + +// Helper function creates request, runs it, checks response and parses task from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// apiVersion - api version which will be used in request +// E.g. client.ExecuteTaskRequest(updateDiskLink.HREF, http.MethodPut, updateDiskLink.Type, "error updating disk: %s", xmlPayload) +func (client *Client) executeTaskRequest(pathURL, requestType, contentType, errorMessage string, payload interface{}, apiVersion string) (Task, error) { + + if !isMessageWithPlaceHolder(errorMessage) { + return Task{}, fmt.Errorf("error message has to include place holder for error") + } + + resp, err := executeRequestWithApiVersion(pathURL, requestType, contentType, payload, client, apiVersion) + if err != nil { + return Task{}, fmt.Errorf(errorMessage, err) + } + + task := NewTask(client) + + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return Task{}, fmt.Errorf(errorMessage, err) + } + + // The request was successful + return *task, nil +} + +// ExecuteRequestWithoutResponse helper function creates request, runs it, checks response and do not expect any values from it. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// E.g. client.ExecuteRequestWithoutResponse(catalogItemHREF.String(), http.MethodDelete, "", "error deleting Catalog item: %s", nil) +func (client *Client) ExecuteRequestWithoutResponse(pathURL, requestType, contentType, errorMessage string, payload interface{}) error { + return client.executeRequestWithoutResponse(pathURL, requestType, contentType, errorMessage, payload, client.APIVersion) +} + +// ExecuteRequestWithoutResponseWithApiVersion helper function creates request, runs it, checks response and do not expect any values from it. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// apiVersion - api version which will be used in request +// E.g. client.ExecuteRequestWithoutResponse(catalogItemHREF.String(), http.MethodDelete, "", "error deleting Catalog item: %s", nil) +func (client *Client) ExecuteRequestWithoutResponseWithApiVersion(pathURL, requestType, contentType, errorMessage string, payload interface{}, apiVersion string) error { + return client.executeRequestWithoutResponse(pathURL, requestType, contentType, errorMessage, payload, apiVersion) +} + +// Helper function creates request, runs it, checks response and do not expect any values from it. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// apiVersion - api version which will be used in request +// E.g. client.ExecuteRequestWithoutResponse(catalogItemHREF.String(), http.MethodDelete, "", "error deleting Catalog item: %s", nil) +func (client *Client) executeRequestWithoutResponse(pathURL, requestType, contentType, errorMessage string, payload interface{}, apiVersion string) error { + + if !isMessageWithPlaceHolder(errorMessage) { + return fmt.Errorf("error message has to include place holder for error") + } + + resp, err := executeRequestWithApiVersion(pathURL, requestType, contentType, payload, client, apiVersion) + if err != nil { + return fmt.Errorf(errorMessage, err) + } + + // log response explicitly because decodeBody() was not triggered + util.ProcessResponseOutput(util.FuncNameCallStack(), resp, fmt.Sprintf("%s", resp.Body)) + + debugShowResponse(resp, []byte("SKIPPED RESPONSE")) + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + // The request was successful + return nil +} + +// ExecuteRequest helper function creates request, runs it, check responses and parses out interface from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// out - structure to be used for unmarshalling xml +// E.g. unmarshalledAdminOrg := &types.AdminOrg{} +// client.ExecuteRequest(adminOrg.AdminOrg.HREF, http.MethodGet, "", "error refreshing organization: %s", nil, unmarshalledAdminOrg) +func (client *Client) ExecuteRequest(pathURL, requestType, contentType, errorMessage string, payload, out interface{}) (*http.Response, error) { + return client.executeRequest(pathURL, requestType, contentType, errorMessage, payload, out, client.APIVersion) +} + +// ExecuteRequestWithApiVersion helper function creates request, runs it, check responses and parses out interface from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// out - structure to be used for unmarshalling xml +// apiVersion - api version which will be used in request +// E.g. unmarshalledAdminOrg := &types.AdminOrg{} +// client.ExecuteRequest(adminOrg.AdminOrg.HREF, http.MethodGet, "", "error refreshing organization: %s", nil, unmarshalledAdminOrg) +func (client *Client) ExecuteRequestWithApiVersion(pathURL, requestType, contentType, errorMessage string, payload, out interface{}, apiVersion string) (*http.Response, error) { + return client.executeRequest(pathURL, requestType, contentType, errorMessage, payload, out, apiVersion) +} + +// Helper function creates request, runs it, check responses and parses out interface from response. +// pathURL - request URL +// requestType - HTTP method type +// contentType - value to set for "Content-Type" +// errorMessage - error message to return when error happens +// payload - XML struct which will be marshalled and added as body/payload +// out - structure to be used for unmarshalling xml +// apiVersion - api version which will be used in request +// E.g. unmarshalledAdminOrg := &types.AdminOrg{} +// client.ExecuteRequest(adminOrg.AdminOrg.HREF, http.MethodGet, "", "error refreshing organization: %s", nil, unmarshalledAdminOrg) +func (client *Client) executeRequest(pathURL, requestType, contentType, errorMessage string, payload, out interface{}, apiVersion string) (*http.Response, error) { + + if !isMessageWithPlaceHolder(errorMessage) { + return &http.Response{}, fmt.Errorf("error message has to include place holder for error") + } + + resp, err := executeRequestWithApiVersion(pathURL, requestType, contentType, payload, client, apiVersion) + if err != nil { + return resp, fmt.Errorf(errorMessage, err) + } + + if err = decodeBody(types.BodyTypeXML, resp, out); err != nil { + return resp, fmt.Errorf("error decoding response: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return resp, fmt.Errorf("error closing response body: %s", err) + } + + // The request was successful + return resp, nil +} + +// ExecuteRequestWithCustomError sends the request and checks for 2xx response. If the returned status code +// was not as expected - the returned error will be unmarshalled to `errType` which implements Go's standard `error` +// interface. +func (client *Client) ExecuteRequestWithCustomError(pathURL, requestType, contentType, errorMessage string, + payload interface{}, errType error) (*http.Response, error) { + return client.ExecuteParamRequestWithCustomError(pathURL, map[string]string{}, requestType, contentType, + errorMessage, payload, errType) +} + +// ExecuteParamRequestWithCustomError behaves exactly like ExecuteRequestWithCustomError but accepts +// query parameter specification +func (client *Client) ExecuteParamRequestWithCustomError(pathURL string, params map[string]string, + requestType, contentType, errorMessage string, payload interface{}, errType error) (*http.Response, error) { + if !isMessageWithPlaceHolder(errorMessage) { + return &http.Response{}, fmt.Errorf("error message has to include place holder for error") + } + + resp, err := executeRequestCustomErr(pathURL, params, requestType, contentType, payload, client, errType, client.APIVersion) + if err != nil { + return &http.Response{}, fmt.Errorf(errorMessage, err) + } + + // read from resp.Body io.Reader for debug output if it has body + var bodyBytes []byte + if resp.Body != nil { + bodyBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + return &http.Response{}, fmt.Errorf("could not read response body: %s", err) + } + // Restore the io.ReadCloser to its original state with no-op closer + resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + } + + util.ProcessResponseOutput(util.FuncNameCallStack(), resp, string(bodyBytes)) + debugShowResponse(resp, bodyBytes) + + return resp, nil +} + +// executeRequest does executeRequestCustomErr and checks for vCD errors in API response +func executeRequestWithApiVersion(pathURL, requestType, contentType string, payload interface{}, client *Client, apiVersion string) (*http.Response, error) { + return executeRequestCustomErr(pathURL, map[string]string{}, requestType, contentType, payload, client, &types.Error{}, apiVersion) +} + +// executeRequestCustomErr performs request and unmarshals API error to errType if not 2xx status was returned +func executeRequestCustomErr(pathURL string, params map[string]string, requestType, contentType string, payload interface{}, client *Client, errType error, apiVersion string) (*http.Response, error) { + requestURI, err := url.ParseRequestURI(pathURL) + if err != nil { + return nil, fmt.Errorf("couldn't parse path request URI '%s': %s", pathURL, err) + } + + var req *http.Request + switch { + // Only send data (and xml.Header) if the payload is actually provided to avoid sending empty body with XML header + // (some Web Application Firewalls block requests when empty XML header is set but not body provided) + case payload != nil: + marshaledXml, err := xml.MarshalIndent(payload, " ", " ") + if err != nil { + return &http.Response{}, fmt.Errorf("error marshalling xml data %s", err) + } + body := bytes.NewBufferString(xml.Header + string(marshaledXml)) + + req = client.NewRequestWithApiVersion(params, requestType, *requestURI, body, apiVersion) + + default: + req = client.NewRequestWithApiVersion(params, requestType, *requestURI, nil, apiVersion) + } + + if contentType != "" { + req.Header.Add("Content-Type", contentType) + } + + setHttpUserAgent(client.UserAgent, req) + + resp, err := client.Http.Do(req) + if err != nil { + return resp, err + } + + return checkRespWithErrType(types.BodyTypeXML, resp, err, errType) +} + +// setHttpUserAgent adds User-Agent string to HTTP request. When supplied string is empty - header will not be set +func setHttpUserAgent(userAgent string, req *http.Request) { + if userAgent != "" { + req.Header.Set("User-Agent", userAgent) + } +} + +func isMessageWithPlaceHolder(message string) bool { + err := fmt.Errorf(message, "test error") + return !strings.Contains(err.Error(), "%!(EXTRA") +} + +// combinedTaskErrorMessage is a general purpose function +// that returns the contents of the operation error and, if found, the error +// returned by the associated task +func combinedTaskErrorMessage(task *types.Task, err error) string { + extendedError := err.Error() + if task.Error != nil { + extendedError = fmt.Sprintf("operation error: %s - task error: [%d - %s] %s", + err, task.Error.MajorErrorCode, task.Error.MinorErrorCode, task.Error.Message) + } + return extendedError +} + +func takeBoolPointer(value bool) *bool { + return &value +} + +// takeIntAddress is a helper that returns the address of an `int` +func takeIntAddress(x int) *int { + return &x +} + +// takeStringPointer is a helper that returns the address of a `string` +func takeStringPointer(x string) *string { + return &x +} + +// IsUuid returns true if the identifier is a bare UUID +func IsUuid(identifier string) bool { + reUuid := regexp.MustCompile(`^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$`) + return reUuid.MatchString(identifier) +} + +// isUrn validates if supplied identifier is of URN format (e.g. urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc) +// it checks for the following criteria: +// 1. idenfifier is not empty +// 2. identifier has 4 elements separated by ':' +// 3. element 1 is 'urn' and element 4 is valid UUID +func isUrn(identifier string) bool { + if identifier == "" { + return false + } + + ss := strings.Split(identifier, ":") + if len(ss) != 4 { + return false + } + + if ss[0] != "urn" && !IsUuid(ss[3]) { + return false + } + + return true +} + +// BuildUrnWithUuid helps to build valid URNs where APIs require URN format, but other API responds with UUID (or +// extracted from HREF) +func BuildUrnWithUuid(urnPrefix, uuid string) (string, error) { + if !IsUuid(uuid) { + return "", fmt.Errorf("supplied uuid '%s' is not valid UUID", uuid) + } + + urn := urnPrefix + uuid + if !isUrn(urn) { + return "", fmt.Errorf("failed building valid URN '%s'", urn) + } + + return urn, nil +} + +// takeFloatAddress is a helper that returns the address of an `float64` +func takeFloatAddress(x float64) *float64 { + return &x +} + +// SetCustomHeader adds custom HTTP header values to a client +func (client *Client) SetCustomHeader(values map[string]string) { + if len(client.customHeader) == 0 { + client.customHeader = make(http.Header) + } + for k, v := range values { + client.customHeader.Add(k, v) + } +} + +// RemoveCustomHeader remove custom header values from the client +func (client *Client) RemoveCustomHeader() { + if client.customHeader != nil { + client.customHeader = nil + } +} + +// --------------------------------------------------------------------- +// The following functions are needed to avoid strict Coverity warnings +// --------------------------------------------------------------------- + +// urlParseRequestURI returns a URL, discarding the error +func urlParseRequestURI(href string) *url.URL { + apiEndpoint, err := url.ParseRequestURI(href) + if err != nil { + util.Logger.Printf("[DEBUG - urlParseRequestURI] error parsing request URI: %s", err) + } + return apiEndpoint +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_token.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_token.go new file mode 100644 index 000000000..0b30b1594 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_token.go @@ -0,0 +1,111 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// SetApiToken behaves similarly to SetToken, with the difference that it will +// return full information about the bearer token, so that the caller can make decisions about token expiration +func (vcdClient *VCDClient) SetApiToken(org, apiToken string) (*types.ApiTokenRefresh, error) { + tokenRefresh, err := vcdClient.GetBearerTokenFromApiToken(org, apiToken) + if err != nil { + return nil, err + } + err = vcdClient.SetToken(org, BearerTokenHeader, tokenRefresh.AccessToken) + if err != nil { + return nil, err + } + return tokenRefresh, nil +} + +// GetBearerTokenFromApiToken uses an API token to retrieve a bearer token +// using the refresh token operation. +func (vcdClient *VCDClient) GetBearerTokenFromApiToken(org, token string) (*types.ApiTokenRefresh, error) { + if vcdClient.Client.APIVCDMaxVersionIs("< 36.1") { + version, err := vcdClient.Client.GetVcdFullVersion() + if err == nil { + return nil, fmt.Errorf("minimum version for API token is 10.3.1 - Version detected: %s", version.Version) + } + // If we can't get the VCD version, we return API version info + return nil, fmt.Errorf("minimum API version for API token is 36.1 - Version detected: %s", vcdClient.Client.APIVersion) + } + var userDef string + urlStr := strings.Replace(vcdClient.Client.VCDHREF.String(), "/api", "", 1) + if strings.EqualFold(org, "system") { + userDef = "provider" + } else { + userDef = fmt.Sprintf("tenant/%s", org) + } + reqUrl := fmt.Sprintf("%s/oauth/%s/token", urlStr, userDef) + reqHref, err := url.ParseRequestURI(reqUrl) + if err != nil { + return nil, fmt.Errorf("error getting request URL from %s : %s", reqUrl, err) + } + + data := bytes.NewBufferString(fmt.Sprintf("grant_type=refresh_token&refresh_token=%s", token)) + req := vcdClient.Client.NewRequest(nil, http.MethodPost, *reqHref, data) + req.Header.Add("Accept", "application/*;version=36.1") + + resp, err := vcdClient.Client.Http.Do(req) + if err != nil { + return nil, err + } + + var body []byte + var tokenDef types.ApiTokenRefresh + if resp.Body != nil { + body, err = ioutil.ReadAll(resp.Body) + } + + // The default response data to show in the logs is a string of asterisks + responseData := "[" + strings.Repeat("*", 10) + "]" + // If users request to see sensitive data, we pass the unchanged response body + if util.LogPasswords { + responseData = string(body) + } + util.ProcessResponseOutput("GetBearerTokenFromApiToken", resp, responseData) + if len(body) == 0 { + return nil, fmt.Errorf("refresh token was empty: %s", resp.Status) + } + if err != nil { + return nil, fmt.Errorf("error extracting refresh token: %s", err) + } + + err = json.Unmarshal(body, &tokenDef) + if err != nil { + return nil, fmt.Errorf("error decoding token text: %s", err) + } + if tokenDef.AccessToken == "" { + // If the access token is empty, the body should contain a composite error message. + // Attempting to decode it and return as much information as possible + var errorBody map[string]string + err2 := json.Unmarshal(body, &errorBody) + if err2 == nil { + errorMessage := "" + for k, v := range errorBody { + if v == "null" || v == "" { + continue + } + errorMessage += fmt.Sprintf("%s: %s - ", k, v) + } + return nil, fmt.Errorf("%s: %s", errorMessage, resp.Status) + } + + // If decoding the error fails, we return the raw body (possibly an unencoded internal server error) + return nil, fmt.Errorf("access token retrieved from API token was empty - %s %s", resp.Status, string(body)) + } + return &tokenDef, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd.go new file mode 100644 index 000000000..7bc70cb41 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd.go @@ -0,0 +1,310 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// VCDClientOption defines signature for customizing VCDClient using +// functional options pattern. +type VCDClientOption func(*VCDClient) error + +type VCDClient struct { + Client Client // Client for the underlying VCD instance + sessionHREF url.URL // HREF for the session API + QueryHREF url.URL // HREF for the query API +} + +func (vcdClient *VCDClient) vcdloginurl() error { + if err := vcdClient.Client.validateAPIVersion(); err != nil { + return fmt.Errorf("could not find valid version for login: %s", err) + } + + // find login address matching the API version + var neededVersion VersionInfo + for _, versionInfo := range vcdClient.Client.supportedVersions.VersionInfos { + if versionInfo.Version == vcdClient.Client.APIVersion { + neededVersion = versionInfo + break + } + } + + loginUrl, err := url.Parse(neededVersion.LoginUrl) + if err != nil { + return fmt.Errorf("couldn't find a LoginUrl for version %s", vcdClient.Client.APIVersion) + } + vcdClient.sessionHREF = *loginUrl + return nil +} + +// vcdCloudApiAuthorize performs the authorization to VCD using open API +func (vcdClient *VCDClient) vcdCloudApiAuthorize(user, pass, org string) (*http.Response, error) { + var missingItems []string + if user == "" { + missingItems = append(missingItems, "user") + } + if pass == "" { + missingItems = append(missingItems, "password") + } + if org == "" { + missingItems = append(missingItems, "org") + } + if len(missingItems) > 0 { + return nil, fmt.Errorf("authorization is not possible because of these missing items: %v", missingItems) + } + + util.Logger.Println("[TRACE] Connecting to VCD using cloudapi") + // This call can only be used by tenants + rawUrl := vcdClient.sessionHREF.Scheme + "://" + vcdClient.sessionHREF.Host + "/cloudapi/1.0.0/sessions" + + // If we are connecting as provider, we need to qualify the request. + if strings.EqualFold(org, "system") { + rawUrl += "/provider" + } + util.Logger.Printf("[TRACE] URL %s\n", rawUrl) + loginUrl, err := url.Parse(rawUrl) + if err != nil { + return nil, fmt.Errorf("error parsing URL %s", rawUrl) + } + vcdClient.sessionHREF = *loginUrl + req := vcdClient.Client.NewRequest(map[string]string{}, http.MethodPost, *loginUrl, nil) + // Set Basic Authentication Header + req.SetBasicAuth(user+"@"+org, pass) + // Add the Accept header. The version must be at least 33.0 for cloudapi to work + req.Header.Add("Accept", "application/*;version="+vcdClient.Client.APIVersion) + resp, err := vcdClient.Client.Http.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + // Store the authorization header + vcdClient.Client.VCDToken = resp.Header.Get(BearerTokenHeader) + vcdClient.Client.VCDAuthHeader = BearerTokenHeader + vcdClient.Client.IsSysAdmin = strings.EqualFold(org, "system") + // Get query href + vcdClient.QueryHREF = vcdClient.Client.VCDHREF + vcdClient.QueryHREF.Path += "/query" + return resp, nil +} + +// NewVCDClient initializes VMware vCloud Director client with reasonable defaults. +// It accepts functions of type VCDClientOption for adjusting defaults. +func NewVCDClient(vcdEndpoint url.URL, insecure bool, options ...VCDClientOption) *VCDClient { + // Setting defaults + vcdClient := &VCDClient{ + Client: Client{ + APIVersion: "33.0", // supported by 10.0+ + // UserAgent cannot embed exact version by default because this is source code and is supposed to be used by programs, + // but any client can customize or disable it at all using WithHttpUserAgent() configuration options function. + UserAgent: "go-vcloud-director", + VCDHREF: vcdEndpoint, + Http: http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecure, + }, + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 120 * time.Second, // Default timeout for TSL hand shake + }, + Timeout: 600 * time.Second, // Default value for http request+response timeout + }, + MaxRetryTimeout: 60, // Default timeout in seconds for retries calls in functions + }, + } + + // Override defaults with functional options + for _, option := range options { + err := option(vcdClient) + if err != nil { + // We do not have error in return of this function signature. + // To avoid breaking API the only thing we can do is panic. + panic(fmt.Sprintf("unable to initialize vCD client: %s", err)) + } + } + return vcdClient +} + +// Authenticate is a helper function that performs a login in vCloud Director. +func (vcdClient *VCDClient) Authenticate(username, password, org string) error { + _, err := vcdClient.GetAuthResponse(username, password, org) + return err +} + +// GetAuthResponse performs authentication and returns the full HTTP response +// The purpose of this function is to preserve information that is useful +// for token-based authentication +func (vcdClient *VCDClient) GetAuthResponse(username, password, org string) (*http.Response, error) { + // LoginUrl + err := vcdClient.vcdloginurl() + if err != nil { + return nil, fmt.Errorf("error finding LoginUrl: %s", err) + } + + // Choose correct auth mechanism based on what type of authentication is used. The end result + // for each of the below functions is to set authorization token vcdCli.Client.VCDToken. + var resp *http.Response + switch { + case vcdClient.Client.UseSamlAdfs: + err = vcdClient.authorizeSamlAdfs(username, password, org, vcdClient.Client.CustomAdfsRptId) + if err != nil { + return nil, fmt.Errorf("error authorizing SAML: %s", err) + } + default: + // Authorize + resp, err = vcdClient.vcdCloudApiAuthorize(username, password, org) + if err != nil { + return nil, fmt.Errorf("error authorizing: %s", err) + } + } + + vcdClient.LogSessionInfo() + return resp, nil +} + +// SetToken will set the authorization token in the client, without using other credentials +// Up to version 29, token authorization uses the header key x-vcloud-authorization +// In version 30+ it also uses X-Vmware-Vcloud-Access-Token:TOKEN coupled with +// X-Vmware-Vcloud-Token-Type:"bearer" +func (vcdClient *VCDClient) SetToken(org, authHeader, token string) error { + if authHeader == ApiTokenHeader { + util.Logger.Printf("[DEBUG] Attempt authentication using API token") + apiToken, err := vcdClient.GetBearerTokenFromApiToken(org, token) + if err != nil { + util.Logger.Printf("[DEBUG] Authentication using API token was UNSUCCESSFUL: %s", err) + return err + } + token = apiToken.AccessToken + authHeader = BearerTokenHeader + vcdClient.Client.UsingAccessToken = true + util.Logger.Printf("[DEBUG] Authentication using API token was SUCCESSFUL") + } + if !vcdClient.Client.UsingAccessToken { + vcdClient.Client.UsingBearerToken = true + } + vcdClient.Client.VCDAuthHeader = authHeader + vcdClient.Client.VCDToken = token + + err := vcdClient.vcdloginurl() + if err != nil { + return fmt.Errorf("error finding LoginUrl: %s", err) + } + + vcdClient.Client.IsSysAdmin = strings.EqualFold(org, "system") + // Get query href + vcdClient.QueryHREF = vcdClient.Client.VCDHREF + vcdClient.QueryHREF.Path += "/query" + + // The client is now ready to connect using the token, but has not communicated with the vCD yet. + // To make sure that it is working, we run a request for the org list. + // This list should work always: when run as system administrator, it retrieves all organizations. + // When run as org user, it only returns the organization the user is authorized to. + // In both cases, we discard the list, as we only use it to certify that the token works. + orgListHREF := vcdClient.Client.VCDHREF + orgListHREF.Path += "/org" + + orgList := new(types.OrgList) + + _, err = vcdClient.Client.ExecuteRequest(orgListHREF.String(), http.MethodGet, + "", "error connecting to vCD using token: %s", nil, orgList) + if err != nil { + return err + } + vcdClient.LogSessionInfo() + return nil +} + +// Disconnect performs a disconnection from the vCloud Director API endpoint. +func (vcdClient *VCDClient) Disconnect() error { + if vcdClient.Client.VCDToken == "" && vcdClient.Client.VCDAuthHeader == "" { + return fmt.Errorf("cannot disconnect, client is not authenticated") + } + req := vcdClient.Client.NewRequest(map[string]string{}, http.MethodDelete, vcdClient.sessionHREF, nil) + // Add the Accept header for vCA + req.Header.Add("Accept", "application/xml;version="+vcdClient.Client.APIVersion) + // Set Authorization Header + req.Header.Add(vcdClient.Client.VCDAuthHeader, vcdClient.Client.VCDToken) + if _, err := checkResp(vcdClient.Client.Http.Do(req)); err != nil { + return fmt.Errorf("error processing session delete for vCloud Director: %s", err) + } + return nil +} + +// WithMaxRetryTimeout allows default vCDClient MaxRetryTimeout value override +func WithMaxRetryTimeout(timeoutSeconds int) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.MaxRetryTimeout = timeoutSeconds + return nil + } +} + +// WithAPIVersion allows to override default API version. Please be cautious +// about changing the version as the default specified is the most tested. +func WithAPIVersion(version string) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.APIVersion = version + return nil + } +} + +// WithHttpTimeout allows to override default http timeout +func WithHttpTimeout(timeout int64) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.Http.Timeout = time.Duration(timeout) * time.Second + return nil + } +} + +// WithSamlAdfs specifies if SAML auth is used for authenticating to vCD instead of local login. +// The following conditions must be met so that SAML authentication works: +// * SAML IdP (Identity Provider) is Active Directory Federation Service (ADFS) +// * WS-Trust authentication endpoint "/adfs/services/trust/13/usernamemixed" must be enabled on +// ADFS server +// By default vCD SAML Entity ID will be used as Relaying Party Trust Identifier unless +// customAdfsRptId is specified +func WithSamlAdfs(useSaml bool, customAdfsRptId string) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.UseSamlAdfs = useSaml + vcdClient.Client.CustomAdfsRptId = customAdfsRptId + return nil + } +} + +// WithHttpUserAgent allows to specify HTTP user-agent which can be useful for statistics tracking. +// By default User-Agent is set to "go-vcloud-director". It can be unset by supplying empty value. +func WithHttpUserAgent(userAgent string) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.UserAgent = userAgent + return nil + } +} + +// WithHttpHeader allows to specify custom HTTP header values. +// Typical usage of this function is to inject a tenant context into the client. +// +// WARNING: Using this function in an environment with concurrent operations may result in negative side effects, +// such as operations as system administrator and as tenant using the same client. +// This setting is justified when we want to start a session where the additional header is always needed. +// For cases where we need system administrator and tenant operations in the same environment we can either +// a) use two separate clients +// or b) use the `additionalHeader` parameter in *newRequest* functions +func WithHttpHeader(options map[string]string) VCDClientOption { + return func(vcdClient *VCDClient) error { + vcdClient.Client.customHeader = make(http.Header) + for k, v := range options { + vcdClient.Client.customHeader.Add(k, v) + } + return nil + } +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_test_unit.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_test_unit.go new file mode 100644 index 000000000..c6f5c27cc --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_test_unit.go @@ -0,0 +1,48 @@ +//go:build unit || ALL +// +build unit ALL + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "io/ioutil" + "os" + "testing" +) + +// goldenString is a test helper to manage Golden files. It supports `update` parameter which may be +// useful for writing such files (manual or automated way). +func goldenString(t *testing.T, goldenFile string, actual string, update bool) string { + t.Helper() + + goldenPath := "../test-resources/golden/" + t.Name() + "_" + goldenFile + ".golden" + + f, err := os.OpenFile(goldenPath, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + t.Fatalf("unable to find golden file '%s': %s", goldenPath, err) + } + defer f.Close() + + if update { + _, err := f.WriteString(actual) + if err != nil { + t.Fatalf("error writing to file %s: %s", goldenPath, err) + } + + return actual + } + + content, err := ioutil.ReadAll(f) + if err != nil { + t.Fatalf("error opening file %s: %s", goldenPath, err) + } + return string(content) +} + +// goldenBytes wraps goldenString and returns []byte +func goldenBytes(t *testing.T, goldenFile string, actual []byte, update bool) []byte { + return []byte(goldenString(t, goldenFile, string(actual), update)) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_versions.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_versions.go new file mode 100644 index 000000000..e49242e07 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/api_vcd_versions.go @@ -0,0 +1,355 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strings" + "time" + + "github.com/araddon/dateparse" + semver "github.com/hashicorp/go-version" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type VersionInfo struct { + Version string `xml:"Version"` + LoginUrl string `xml:"LoginUrl"` + Deprecated bool `xml:"deprecated,attr,omitempty"` +} + +type VersionInfos []VersionInfo + +type SupportedVersions struct { + VersionInfos `xml:"VersionInfo"` +} + +// VcdVersion contains the full information about a VCD version +type VcdVersion struct { + Version *semver.Version + Time time.Time +} + +// apiVersionToVcdVersion gets the vCD version from max supported API version +var apiVersionToVcdVersion = map[string]string{ + "29.0": "9.0", + "30.0": "9.1", + "31.0": "9.5", + "32.0": "9.7", + "33.0": "10.0", + "34.0": "10.1", + "35.0": "10.2", + "36.0": "10.3", // Provisional version for non-GA release. It may change later +} + +// vcdVersionToApiVersion gets the max supported API version from vCD version +var vcdVersionToApiVersion = map[string]string{ + "9.0": "29.0", + "9.1": "30.0", + "9.5": "31.0", + "9.7": "32.0", + "10.0": "33.0", + "10.1": "34.0", + "10.2": "35.0", + "10.3": "36.0", // Provisional version for non-GA release. It may change later +} + +// to make vcdVersionToApiVersion used +var _ = vcdVersionToApiVersion + +// APIVCDMaxVersionIs compares against maximum vCD supported API version from /api/versions (not necessarily +// the currently used one). This allows to check what is the maximum API version that vCD instance +// supports and can be used to guess vCD product version. API 31.0 support was first introduced in +// vCD 9.5 (as per https://code.vmware.com/doc/preview?id=8072). Therefore APIMaxVerIs(">= 31.0") +// implies that you have vCD 9.5 or newer running inside. +// It does not require for the client to be authenticated. +// +// Format: ">= 27.0, < 32.0", ">= 30.0", "= 27.0" +// +// vCD version mapping to API version support https://code.vmware.com/doc/preview?id=8072 +func (client *Client) APIVCDMaxVersionIs(versionConstraint string) bool { + err := client.vcdFetchSupportedVersions() + if err != nil { + util.Logger.Printf("[ERROR] could not retrieve supported versions: %s", err) + return false + } + + util.Logger.Printf("[TRACE] checking max API version against constraints '%s'", versionConstraint) + maxVersion, err := client.MaxSupportedVersion() + if err != nil { + util.Logger.Printf("[ERROR] unable to find max supported version : %s", err) + return false + } + + isSupported, err := client.apiVersionMatchesConstraint(maxVersion, versionConstraint) + if err != nil { + util.Logger.Printf("[ERROR] unable to find max supported version : %s", err) + return false + } + + return isSupported +} + +// APIClientVersionIs allows to compare against currently used API version VCDClient.Client.APIVersion. +// Can be useful to validate if a certain feature can be used or not. +// It does not require for the client to be authenticated. +// +// Format: ">= 27.0, < 32.0", ">= 30.0", "= 27.0" +// +// vCD version mapping to API version support https://code.vmware.com/doc/preview?id=8072 +func (client *Client) APIClientVersionIs(versionConstraint string) bool { + + util.Logger.Printf("[TRACE] checking current API version against constraints '%s'", versionConstraint) + + isSupported, err := client.apiVersionMatchesConstraint(client.APIVersion, versionConstraint) + if err != nil { + util.Logger.Printf("[ERROR] unable to find supported version : %s", err) + return false + } + + return isSupported +} + +// vcdFetchSupportedVersions retrieves list of supported versions from +// /api/versions endpoint and stores them in VCDClient for future uses. +// It only does it once. +func (client *Client) vcdFetchSupportedVersions() error { + // Only fetch /versions if it is not stored already + numVersions := len(client.supportedVersions.VersionInfos) + if numVersions > 0 { + util.Logger.Printf("[TRACE] skipping fetch of versions because %d are stored", numVersions) + return nil + } + + apiEndpoint := client.VCDHREF + apiEndpoint.Path += "/versions" + + suppVersions := new(SupportedVersions) + _, err := client.ExecuteRequest(apiEndpoint.String(), http.MethodGet, + "", "error fetching versions: %s", nil, suppVersions) + + client.supportedVersions = *suppVersions + + // Log all supported API versions in one line to help identify vCD version from logs + allApiVersions := make([]string, len(client.supportedVersions.VersionInfos)) + for versionIndex, version := range client.supportedVersions.VersionInfos { + allApiVersions[versionIndex] = version.Version + } + util.Logger.Printf("[DEBUG] supported API versions : %s", strings.Join(allApiVersions, ",")) + + return err +} + +// MaxSupportedVersion parses supported version list and returns the highest version in string format. +func (client *Client) MaxSupportedVersion() (string, error) { + versions := make([]*semver.Version, len(client.supportedVersions.VersionInfos)) + for index, versionInfo := range client.supportedVersions.VersionInfos { + version, err := semver.NewVersion(versionInfo.Version) + if err != nil { + return "", fmt.Errorf("error parsing version %s: %s", versionInfo.Version, err) + } + versions[index] = version + } + // Sort supported versions in order lowest-highest + sort.Sort(semver.Collection(versions)) + + switch { + case len(versions) > 1: + return versions[len(versions)-1].Original(), nil + case len(versions) == 1: + return versions[0].Original(), nil + default: + return "", fmt.Errorf("could not identify supported versions") + } +} + +// vcdCheckSupportedVersion checks if there is at least one specified version exactly matching listed ones. +// Format example "27.0" +func (client *Client) vcdCheckSupportedVersion(version string) error { + return client.checkSupportedVersionConstraint(fmt.Sprintf("= %s", version)) +} + +// Checks if there is at least one specified version matching the list returned by vCD. +// Constraint format can be in format ">= 27.0, < 32",">= 30" ,"= 27.0". +func (client *Client) checkSupportedVersionConstraint(versionConstraint string) error { + for _, versionInfo := range client.supportedVersions.VersionInfos { + versionMatch, err := client.apiVersionMatchesConstraint(versionInfo.Version, versionConstraint) + if err != nil { + return fmt.Errorf("cannot match version: %s", err) + } + + if versionMatch { + return nil + } + } + return fmt.Errorf("version %s is not supported", versionConstraint) +} + +func (client *Client) apiVersionMatchesConstraint(version, versionConstraint string) (bool, error) { + + checkVer, err := semver.NewVersion(version) + if err != nil { + return false, fmt.Errorf("[ERROR] unable to parse version %s : %s", version, err) + } + // Create a provided constraint to check against current max version + constraints, err := semver.NewConstraint(versionConstraint) + if err != nil { + return false, fmt.Errorf("[ERROR] unable to parse given version constraint '%s' : %s", versionConstraint, err) + } + if constraints.Check(checkVer) { + util.Logger.Printf("[INFO] API version %s satisfies constraints '%s'", checkVer, constraints) + return true, nil + } + + util.Logger.Printf("[TRACE] API version %s does not satisfy constraints '%s'", checkVer, constraints) + return false, nil +} + +// validateAPIVersion fetches API versions +func (client *Client) validateAPIVersion() error { + err := client.vcdFetchSupportedVersions() + if err != nil { + return fmt.Errorf("could not retrieve supported versions: %s", err) + } + + // Check if version is supported + err = client.vcdCheckSupportedVersion(client.APIVersion) + if err != nil { + return fmt.Errorf("API version %s is not supported: %s", client.APIVersion, err) + } + + return nil +} + +// GetSpecificApiVersionOnCondition returns default version or wantedApiVersion if it is connected to version +// described in vcdApiVersionCondition +// f.e. values ">= 32.0", "32.0" returns 32.0 if vCD version is above or 9.7 +func (client *Client) GetSpecificApiVersionOnCondition(vcdApiVersionCondition, wantedApiVersion string) string { + apiVersion := client.APIVersion + if client.APIVCDMaxVersionIs(vcdApiVersionCondition) { + apiVersion = wantedApiVersion + } + return apiVersion +} + +// GetVcdVersion finds the VCD version and the time of build +func (client *Client) GetVcdVersion() (string, time.Time, error) { + + path := client.VCDHREF + path.Path += "/admin" + var admin types.VCloud + _, err := client.ExecuteRequest(path.String(), http.MethodGet, + "", "error retrieving admin info: %s", nil, &admin) + if err != nil { + return "", time.Time{}, err + } + description := admin.Description + + if description == "" { + return "", time.Time{}, fmt.Errorf("no version information found") + } + reVersion := regexp.MustCompile(`^\s*(\S+)\s+(.*)`) + + versionList := reVersion.FindAllStringSubmatch(description, -1) + + if len(versionList) == 0 || len(versionList[0]) < 2 { + return "", time.Time{}, fmt.Errorf("error getting version information from description %s", description) + } + version := versionList[0][1] + versionDate := versionList[0][2] + versionTime, err := dateparse.ParseStrict(versionDate) + if err != nil { + return "", time.Time{}, fmt.Errorf("[version %s] could not convert date %s to formal date: %s", version, versionDate, err) + } + + return version, versionTime, nil +} + +// GetVcdShortVersion returns the VCD version (three digits, no build info) +func (client *Client) GetVcdShortVersion() (string, error) { + + vcdVersion, err := client.GetVcdFullVersion() + if err != nil { + return "", fmt.Errorf("error getting version digits: %s", err) + } + digits := vcdVersion.Version.Segments() + return fmt.Sprintf("%d.%d.%d", digits[0], digits[1], digits[2]), nil +} + +// GetVcdFullVersion returns the full VCD version information as a structure +func (client *Client) GetVcdFullVersion() (VcdVersion, error) { + var vcdVersion VcdVersion + version, versionTime, err := client.GetVcdVersion() + if err != nil { + return VcdVersion{}, err + } + + vcdVersion.Version, err = semver.NewVersion(version) + if err != nil { + return VcdVersion{}, err + } + if len(vcdVersion.Version.Segments()) < 4 { + return VcdVersion{}, fmt.Errorf("error getting version digits from version %s", version) + } + vcdVersion.Time = versionTime + return vcdVersion, nil +} + +// intListToVersion converts a list of integers into a dot-separated string +func intListToVersion(digits []int, atMost int) string { + result := "" + for i, digit := range digits { + if result != "" { + result += "." + } + if i >= atMost { + result += "0" + } else { + result += fmt.Sprintf("%d", digit) + } + } + return result +} + +// VersionEqualOrGreater return true if the current version is the same or greater than the one being compared. +// If howManyDigits is > 3, the comparison includes the build. +// Examples: +// client version is 1.2.3.1234 +// compare version is 1.2.3.2000 +// function return true if howManyDigits is <= 3, but false if howManyDigits is > 3 +// +// client version is 1.2.3.1234 +// compare version is 1.1.1.0 +// function returns true regardless of value of howManyDigits +func (client *Client) VersionEqualOrGreater(compareTo string, howManyDigits int) (bool, error) { + + fullVersion, err := client.GetVcdFullVersion() + if err != nil { + return false, err + } + compareToVersion, err := semver.NewVersion(compareTo) + if err != nil { + return false, err + } + if howManyDigits < 4 { + currentString := intListToVersion(fullVersion.Version.Segments(), howManyDigits) + compareToString := intListToVersion(compareToVersion.Segments(), howManyDigits) + fullVersion.Version, err = semver.NewVersion(currentString) + if err != nil { + return false, err + } + compareToVersion, err = semver.NewVersion(compareToString) + if err != nil { + return false, err + } + } + + return fullVersion.Version.GreaterThanOrEqual(compareToVersion), nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalog.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalog.go new file mode 100644 index 000000000..5b053daa3 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalog.go @@ -0,0 +1,838 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +const ( + defaultPieceSize int64 = 1024 * 1024 +) + +type Catalog struct { + Catalog *types.Catalog + client *Client + parent organization +} + +func NewCatalog(client *Client) *Catalog { + return &Catalog{ + Catalog: new(types.Catalog), + client: client, + } +} + +// Delete deletes the Catalog, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/1046/vmware-cloud-director/doc/doc/operations/DELETE-Catalog.html +func (catalog *Catalog) Delete(force, recursive bool) error { + + adminCatalogHREF := catalog.client.VCDHREF + catalogID, err := getBareEntityUuid(catalog.Catalog.ID) + if err != nil { + return err + } + if catalogID == "" { + return fmt.Errorf("empty ID returned for catalog %s", catalog.Catalog.Name) + } + adminCatalogHREF.Path += "/admin/catalog/" + catalogID + + req := catalog.client.NewRequest(map[string]string{ + "force": strconv.FormatBool(force), + "recursive": strconv.FormatBool(recursive), + }, http.MethodDelete, adminCatalogHREF, nil) + + resp, err := checkResp(catalog.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error deleting Catalog %s: %s", catalog.Catalog.Name, err) + } + task := NewTask(catalog.client) + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return fmt.Errorf("error decoding task response: %s", err) + } + if task.Task.Status == "error" { + return fmt.Errorf(combinedTaskErrorMessage(task.Task, fmt.Errorf("catalog %s not properly destroyed", catalog.Catalog.Name))) + } + return task.WaitTaskCompletion() +} + +// Envelope is a ovf description root element. File contains information for vmdk files. +// Namespace: http://schemas.dmtf.org/ovf/envelope/1 +// Description: Envelope is a ovf description root element. File contains information for vmdk files.. +type Envelope struct { + File []struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr"` + Size int `xml:"size,attr"` + ChunkSize int `xml:"chunkSize,attr"` + } `xml:"References>File"` +} + +// If catalog item is a valid CatalogItem and the call succeeds, +// then the function returns a CatalogItem. If the item does not +// exist, then it returns an empty CatalogItem. If the call fails +// at any point, it returns an error. +// Deprecated: use GetCatalogItemByName instead +func (cat *Catalog) FindCatalogItem(catalogItemName string) (CatalogItem, error) { + for _, catalogItems := range cat.Catalog.CatalogItems { + for _, catalogItem := range catalogItems.CatalogItem { + if catalogItem.Name == catalogItemName && catalogItem.Type == "application/vnd.vmware.vcloud.catalogItem+xml" { + + cat := NewCatalogItem(cat.client) + + _, err := cat.client.ExecuteRequest(catalogItem.HREF, http.MethodGet, + "", "error retrieving catalog: %s", nil, cat.CatalogItem) + return *cat, err + } + } + } + + return CatalogItem{}, nil +} + +// UploadOvf uploads an ova/ovf file to a catalog. This method only uploads bits to vCD spool area. +// ovaFileName should be the path of OVA or OVF file(not ovf folder) itself. For OVF, +// user need to make sure all the files that OVF depends on exist and locate under the same folder. +// Returns errors if any occur during upload from vCD or upload process. On upload fail client may need to +// remove vCD catalog item which waits for files to be uploaded. Files from ova are extracted to system +// temp folder "govcd+random number" and left for inspection on error. +func (cat *Catalog) UploadOvf(ovaFileName, itemName, description string, uploadPieceSize int64) (UploadTask, error) { + + // On a very high level the flow is as follows + // 1. Makes a POST call to vCD to create the catalog item (also creates a transfer folder in the spool area and as result will give a sparse catalog item resource XML). + // 2. Wait for the links to the transfer folder to appear in the resource representation of the catalog item. + // 3. Start uploading bits to the transfer folder + // 4. Wait on the import task to finish on vCD side -> task success = upload complete + + if *cat == (Catalog{}) { + return UploadTask{}, errors.New("catalog can not be empty or nil") + } + + ovaFileName, err := validateAndFixFilePath(ovaFileName) + if err != nil { + return UploadTask{}, err + } + + for _, catalogItemName := range getExistingCatalogItems(cat) { + if catalogItemName == itemName { + return UploadTask{}, fmt.Errorf("catalog item '%s' already exists. Upload with different name", itemName) + } + } + + isOvf := false + fileContentType, err := util.GetFileContentType(ovaFileName) + if err != nil { + return UploadTask{}, err + } + if strings.Contains(fileContentType, "text/xml") { + isOvf = true + } + ovfFilePath := ovaFileName + tmpDir := path.Dir(ovaFileName) + filesAbsPaths := []string{ovfFilePath} + if !isOvf { + filesAbsPaths, tmpDir, err = util.Unpack(ovaFileName) + if err != nil { + return UploadTask{}, fmt.Errorf("%s. Unpacked files for checking are accessible in: %s", err, tmpDir) + } + ovfFilePath, err = getOvfPath(filesAbsPaths) + if err != nil { + return UploadTask{}, fmt.Errorf("%s. Unpacked files for checking are accessible in: %s", err, tmpDir) + } + } + + ovfFileDesc, err := getOvf(ovfFilePath) + if err != nil { + return UploadTask{}, fmt.Errorf("%s. OVF/Unpacked files for checking are accessible in: %s", err, tmpDir) + } + + if !isOvf { + err = validateOvaContent(filesAbsPaths, &ovfFileDesc, tmpDir) + if err != nil { + return UploadTask{}, fmt.Errorf("%s. Unpacked files for checking are accessible in: %s", err, tmpDir) + } + } else { + dir := path.Dir(ovfFilePath) + for _, fileItem := range ovfFileDesc.File { + dependFile := path.Join(dir, fileItem.HREF) + dependFile, err := validateAndFixFilePath(dependFile) + if err != nil { + return UploadTask{}, err + } + filesAbsPaths = append(filesAbsPaths, dependFile) + } + } + + catalogItemUploadURL, err := findCatalogItemUploadLink(cat, "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml") + if err != nil { + return UploadTask{}, err + } + + vappTemplateUrl, err := createItemForUpload(cat.client, catalogItemUploadURL, itemName, description) + if err != nil { + return UploadTask{}, err + } + + vappTemplate, err := queryVappTemplate(cat.client, vappTemplateUrl, itemName) + if err != nil { + return UploadTask{}, err + } + + ovfUploadHref, err := getUploadLink(vappTemplate.Files) + if err != nil { + return UploadTask{}, err + } + + err = uploadOvfDescription(cat.client, ovfFilePath, ovfUploadHref) + if err != nil { + removeCatalogItemOnError(cat.client, vappTemplateUrl, itemName) + return UploadTask{}, err + } + + vappTemplate, err = waitForTempUploadLinks(cat.client, vappTemplateUrl, itemName) + if err != nil { + removeCatalogItemOnError(cat.client, vappTemplateUrl, itemName) + return UploadTask{}, err + } + + progressCallBack, uploadProgress := getProgressCallBackFunction() + + uploadError := *new(error) + + // sending upload process to background, this allows not to lock and return task to client + // The error should be captured in uploadError, but just in case, we add a logging for the + // main error + go func() { + err = uploadFiles(cat.client, vappTemplate, &ovfFileDesc, tmpDir, filesAbsPaths, uploadPieceSize, progressCallBack, &uploadError, isOvf) + if err != nil { + util.Logger.Println(strings.Repeat("*", 80)) + util.Logger.Printf("*** [DEBUG - UploadOvf] error calling uploadFiles: %s\n", err) + util.Logger.Println(strings.Repeat("*", 80)) + } + }() + + var task Task + for _, item := range vappTemplate.Tasks.Task { + task, err = createTaskForVcdImport(cat.client, item.HREF) + if err != nil { + removeCatalogItemOnError(cat.client, vappTemplateUrl, itemName) + return UploadTask{}, err + } + if task.Task.Status == "error" { + removeCatalogItemOnError(cat.client, vappTemplateUrl, itemName) + return UploadTask{}, fmt.Errorf("task did not complete succesfully: %s", task.Task.Description) + } + } + + uploadTask := NewUploadTask(&task, uploadProgress, &uploadError) + + util.Logger.Printf("[TRACE] Upload finished and task for vcd import created. \n") + + return *uploadTask, nil +} + +// Upload files for vCD created upload links. Different approach then vmdk file are +// chunked (e.g. test.vmdk.000000000, test.vmdk.000000001 or test.vmdk). vmdk files are chunked if +// in description file attribute ChunkSize is not zero. +// params: +// client - client for requests +// vappTemplate - parsed from response vApp template +// ovfFileDesc - parsed from xml part containing ova files definition +// tempPath - path where extracted files are +// filesAbsPaths - array of extracted files +// uploadPieceSize - size of chunks in which the file will be uploaded to the catalog. +// callBack a function with signature //function(bytesUpload, totalSize) to let the caller monitor progress of the upload operation. +// uploadError - error to be ready be task +func uploadFiles(client *Client, vappTemplate *types.VAppTemplate, ovfFileDesc *Envelope, tempPath string, filesAbsPaths []string, uploadPieceSize int64, progressCallBack func(bytesUpload, totalSize int64), uploadError *error, isOvf bool) error { + var uploadedBytes int64 + for _, item := range vappTemplate.Files.File { + if item.BytesTransferred == 0 { + number, err := getFileFromDescription(item.Name, ovfFileDesc) + if err != nil { + util.Logger.Printf("[Error] Error uploading files: %#v", err) + *uploadError = err + return err + } + if ovfFileDesc.File[number].ChunkSize != 0 { + chunkFilePaths := getChunkedFilePaths(tempPath, ovfFileDesc.File[number].HREF, ovfFileDesc.File[number].Size, ovfFileDesc.File[number].ChunkSize) + details := uploadDetails{ + uploadLink: item.Link[0].HREF, + uploadedBytes: uploadedBytes, + fileSizeToUpload: int64(ovfFileDesc.File[number].Size), + uploadPieceSize: uploadPieceSize, + uploadedBytesForCallback: uploadedBytes, + allFilesSize: getAllFileSizeSum(ovfFileDesc), + callBack: progressCallBack, + uploadError: uploadError, + } + tempVar, err := uploadMultiPartFile(client, chunkFilePaths, details) + if err != nil { + util.Logger.Printf("[Error] Error uploading files: %#v", err) + *uploadError = err + return err + } + uploadedBytes += tempVar + } else { + details := uploadDetails{ + uploadLink: item.Link[0].HREF, + uploadedBytes: 0, + fileSizeToUpload: item.Size, + uploadPieceSize: uploadPieceSize, + uploadedBytesForCallback: uploadedBytes, + allFilesSize: getAllFileSizeSum(ovfFileDesc), + callBack: progressCallBack, + uploadError: uploadError, + } + tempVar, err := uploadFile(client, findFilePath(filesAbsPaths, item.Name), details) + if err != nil { + util.Logger.Printf("[Error] Error uploading files: %#v", err) + *uploadError = err + return err + } + uploadedBytes += tempVar + } + } + } + + //remove extracted files with temp dir + //If isOvf flag is true, means tempPath is origin OVF folder, not extracted, won't delete + if !isOvf { + err := os.RemoveAll(tempPath) + if err != nil { + util.Logger.Printf("[Error] Error removing temporary files: %#v", err) + *uploadError = err + return err + } + } + uploadError = nil + return nil +} + +func getFileFromDescription(fileToFind string, ovfFileDesc *Envelope) (int, error) { + for fileInArray, item := range ovfFileDesc.File { + if item.HREF == fileToFind { + util.Logger.Printf("[TRACE] getFileFromDescription - found matching file: %s in array: %d\n", fileToFind, fileInArray) + return fileInArray, nil + } + } + return -1, errors.New("file expected from vcd didn't match any description file") +} + +func getAllFileSizeSum(ovfFileDesc *Envelope) (sizeSum int64) { + sizeSum = 0 + for _, item := range ovfFileDesc.File { + sizeSum += int64(item.Size) + } + return +} + +// Uploads chunked ova file for vCD created upload link. +// params: +// client - client for requests +// vappTemplate - parsed from response vApp template +// filePaths - all chunked vmdk file paths +// uploadDetails - file upload settings and data +func uploadMultiPartFile(client *Client, filePaths []string, uDetails uploadDetails) (int64, error) { + util.Logger.Printf("[TRACE] Upload multi part file: %v\n, href: %s, size: %v", filePaths, uDetails.uploadLink, uDetails.fileSizeToUpload) + + var uploadedBytes int64 + + for i, filePath := range filePaths { + util.Logger.Printf("[TRACE] Uploading file: %v\n", i+1) + uDetails.uploadedBytesForCallback += uploadedBytes // previous files uploaded size plus current upload size + uDetails.uploadedBytes = uploadedBytes + tempVar, err := uploadFile(client, filePath, uDetails) + if err != nil { + return uploadedBytes, err + } + uploadedBytes += tempVar + } + return uploadedBytes, nil +} + +// Function waits until vCD provides temporary file upload links. +func waitForTempUploadLinks(client *Client, vappTemplateUrl *url.URL, newItemName string) (*types.VAppTemplate, error) { + var vAppTemplate *types.VAppTemplate + var err error + for { + util.Logger.Printf("[TRACE] Sleep... for 5 seconds.\n") + time.Sleep(time.Second * 5) + vAppTemplate, err = queryVappTemplate(client, vappTemplateUrl, newItemName) + if err != nil { + return nil, err + } + if vAppTemplate.Files != nil && len(vAppTemplate.Files.File) > 1 { + util.Logger.Printf("[TRACE] upload link prepared.\n") + break + } + } + return vAppTemplate, nil +} + +func queryVappTemplate(client *Client, vappTemplateUrl *url.URL, newItemName string) (*types.VAppTemplate, error) { + util.Logger.Printf("[TRACE] Querying vapp template: %s\n", vappTemplateUrl) + + vappTemplateParsed := &types.VAppTemplate{} + + _, err := client.ExecuteRequest(vappTemplateUrl.String(), http.MethodGet, + "", "error querying vApp template: %s", nil, vappTemplateParsed) + if err != nil { + return nil, err + } + + for _, task := range vappTemplateParsed.Tasks.Task { + if task.Status == "error" && newItemName == task.Owner.Name { + util.Logger.Printf("[Error] %#v", task.Error) + return vappTemplateParsed, fmt.Errorf("error in vcd returned error code: %d, error: %s and message: %s ", task.Error.MajorErrorCode, task.Error.MinorErrorCode, task.Error.Message) + } + } + + return vappTemplateParsed, nil +} + +// Uploads ovf description file from unarchived provided ova file. As a result vCD will generate temporary upload links which has to be queried later. +// Function will return parsed part for upload files from description xml. +func uploadOvfDescription(client *Client, ovfFile string, ovfUploadUrl *url.URL) error { + util.Logger.Printf("[TRACE] Uploding ovf description with file: %s and url: %s\n", ovfFile, ovfUploadUrl) + // #nosec G304 - linter does not like 'filePath' to be a variable. However this is necessary for file uploads. + openedFile, err := os.Open(ovfFile) + if err != nil { + return err + } + + var buf bytes.Buffer + ovfReader := io.TeeReader(openedFile, &buf) + + request := client.NewRequest(map[string]string{}, http.MethodPut, *ovfUploadUrl, ovfReader) + request.Header.Add("Content-Type", "text/xml") + + _, err = checkResp(client.Http.Do(request)) + if err != nil { + return err + } + + err = openedFile.Close() + if err != nil { + util.Logger.Printf("[Error] Error closing file: %#v", err) + return err + } + + return nil +} + +func parseOvfFileDesc(file *os.File, ovfFileDesc *Envelope) error { + ovfXml, err := ioutil.ReadAll(file) + if err != nil { + return err + } + + err = xml.Unmarshal(ovfXml, &ovfFileDesc) + if err != nil { + return err + } + return nil +} + +func findCatalogItemUploadLink(catalog *Catalog, applicationType string) (*url.URL, error) { + for _, item := range catalog.Catalog.Link { + if item.Type == applicationType && item.Rel == "add" { + util.Logger.Printf("[TRACE] Found Catalong link for upload: %s\n", item.HREF) + + uploadURL, err := url.ParseRequestURI(item.HREF) + if err != nil { + return nil, err + } + + util.Logger.Printf("[TRACE] findCatalogItemUploadLink - catalog item upload url found: %s \n", uploadURL) + return uploadURL, nil + } + } + return nil, errors.New("catalog upload URL not found") +} + +func getExistingCatalogItems(catalog *Catalog) (catalogItemNames []string) { + for _, catalogItems := range catalog.Catalog.CatalogItems { + for _, catalogItem := range catalogItems.CatalogItem { + catalogItemNames = append(catalogItemNames, catalogItem.Name) + } + } + return +} + +func findFilePath(filesAbsPaths []string, fileName string) string { + for _, item := range filesAbsPaths { + _, file := filepath.Split(item) + if file == fileName { + return item + } + } + return "" +} + +// Initiates creation of item and returns ovf upload url for created item. +func createItemForUpload(client *Client, createHREF *url.URL, catalogItemName string, itemDescription string) (*url.URL, error) { + util.Logger.Printf("[TRACE] createItemForUpload: %s, item name: %v, description: %v \n", createHREF, catalogItemName, itemDescription) + reqBody := bytes.NewBufferString( + "" + + "" + itemDescription + "" + + "") + + request := client.NewRequest(map[string]string{}, http.MethodPost, *createHREF, reqBody) + request.Header.Add("Content-Type", "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml") + + response, err := checkResp(client.Http.Do(request)) + if err != nil { + return nil, err + } + defer response.Body.Close() + + catalogItemParsed := &types.CatalogItem{} + if err = decodeBody(types.BodyTypeXML, response, catalogItemParsed); err != nil { + return nil, err + } + + util.Logger.Printf("[TRACE] Catalog item parsed: %#v\n", catalogItemParsed) + + ovfUploadUrl, err := url.ParseRequestURI(catalogItemParsed.Entity.HREF) + if err != nil { + return nil, err + } + + return ovfUploadUrl, nil +} + +// Helper method to get path to multi-part files. +//For example a file called test.vmdk with total_file_size = 100 bytes and part_size = 40 bytes, implies the file is made of *3* part files. +// - test.vmdk.000000000 = 40 bytes +// - test.vmdk.000000001 = 40 bytes +// - test.vmdk.000000002 = 20 bytes +//Say base_dir = /dummy_path/, and base_file_name = test.vmdk then +//the output of this function will be [/dummy_path/test.vmdk.000000000, +// /dummy_path/test.vmdk.000000001, /dummy_path/test.vmdk.000000002] +func getChunkedFilePaths(baseDir, baseFileName string, totalFileSize, partSize int) []string { + var filePaths []string + numbParts := math.Ceil(float64(totalFileSize) / float64(partSize)) + for i := 0; i < int(numbParts); i++ { + temp := "000000000" + strconv.Itoa(i) + postfix := temp[len(temp)-9:] + filePath := path.Join(baseDir, baseFileName+"."+postfix) + filePaths = append(filePaths, filePath) + } + + util.Logger.Printf("[TRACE] Chunked files file paths: %s \n", filePaths) + return filePaths +} + +func getOvfPath(filesAbsPaths []string) (string, error) { + for _, filePath := range filesAbsPaths { + if filepath.Ext(filePath) == ".ovf" { + return filePath, nil + } + } + return "", errors.New("ova is not correct - missing ovf file") +} + +func getOvf(ovfFilePath string) (Envelope, error) { + // #nosec G304 - linter does not like 'filePath' to be a variable. However this is necessary for file uploads. + openedFile, err := os.Open(ovfFilePath) + if err != nil { + return Envelope{}, err + } + + var ovfFileDesc Envelope + err = parseOvfFileDesc(openedFile, &ovfFileDesc) + if err != nil { + return Envelope{}, err + } + + err = openedFile.Close() + if err != nil { + util.Logger.Printf("[Error] Error closing file: %#v", err) + return Envelope{}, err + } + + return ovfFileDesc, nil +} + +func validateOvaContent(filesAbsPaths []string, ovfFileDesc *Envelope, tempPath string) error { + for _, fileDescription := range ovfFileDesc.File { + if fileDescription.ChunkSize == 0 { + err := checkIfFileMatchesDescription(filesAbsPaths, fileDescription) + if err != nil { + return err + } + // check chunked ova content + } else { + chunkFilePaths := getChunkedFilePaths(tempPath, fileDescription.HREF, fileDescription.Size, fileDescription.ChunkSize) + for part, chunkedFilePath := range chunkFilePaths { + _, fileName := filepath.Split(chunkedFilePath) + chunkedFileSize := fileDescription.Size - part*fileDescription.ChunkSize + if chunkedFileSize > fileDescription.ChunkSize { + chunkedFileSize = fileDescription.ChunkSize + } + chunkedFileDescription := struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr"` + Size int `xml:"size,attr"` + ChunkSize int `xml:"chunkSize,attr"` + }{fileName, "", chunkedFileSize, fileDescription.ChunkSize} + err := checkIfFileMatchesDescription(filesAbsPaths, chunkedFileDescription) + if err != nil { + return err + } + } + } + } + return nil +} + +func checkIfFileMatchesDescription(filesAbsPaths []string, fileDescription struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr"` + Size int `xml:"size,attr"` + ChunkSize int `xml:"chunkSize,attr"` +}) error { + filePath := findFilePath(filesAbsPaths, fileDescription.HREF) + if filePath == "" { + return fmt.Errorf("file '%s' described in ovf was not found in ova", fileDescription.HREF) + } + if fileInfo, err := os.Stat(filePath); err == nil { + if fileDescription.Size > 0 && (fileInfo.Size() != int64(fileDescription.Size)) { + return fmt.Errorf("file size didn't match described in ovf: %s", filePath) + } + } else { + return err + } + return nil +} + +func removeCatalogItemOnError(client *Client, vappTemplateLink *url.URL, itemName string) { + if vappTemplateLink != nil { + util.Logger.Printf("[TRACE] Deleting Catalog item %v", vappTemplateLink) + + // wait for task, cancel it and catalog item will be removed. + var vAppTemplate *types.VAppTemplate + var err error + for { + util.Logger.Printf("[TRACE] Sleep... for 5 seconds.\n") + time.Sleep(time.Second * 5) + vAppTemplate, err = queryVappTemplate(client, vappTemplateLink, itemName) + if err != nil { + util.Logger.Printf("[Error] Error deleting Catalog item %s: %s", vappTemplateLink, err) + } + if len(vAppTemplate.Tasks.Task) > 0 { + util.Logger.Printf("[TRACE] Task found. Will try to cancel.\n") + break + } + } + + for _, taskItem := range vAppTemplate.Tasks.Task { + if itemName == taskItem.Owner.Name { + task := NewTask(client) + task.Task = taskItem + err = task.CancelTask() + if err != nil { + util.Logger.Printf("[ERROR] Error canceling task for catalog item upload %#v", err) + } + } + } + } else { + util.Logger.Printf("[Error] Failed to delete catalog item created with error: %v", vappTemplateLink) + } +} + +func (cat *Catalog) UploadMediaImage(mediaName, mediaDescription, filePath string, uploadPieceSize int64) (UploadTask, error) { + + if *cat == (Catalog{}) { + return UploadTask{}, errors.New("catalog can not be empty or nil") + } + + mediaFilePath, err := validateAndFixFilePath(filePath) + if err != nil { + return UploadTask{}, err + } + + isISOGood, err := verifyIso(mediaFilePath) + if err != nil || !isISOGood { + return UploadTask{}, fmt.Errorf("[ERROR] File %s isn't correct iso file: %#v", mediaFilePath, err) + } + + file, e := os.Stat(mediaFilePath) + if e != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Issue finding file: %#v", e) + } + fileSize := file.Size() + + for _, catalogItemName := range getExistingCatalogItems(cat) { + if catalogItemName == mediaName { + return UploadTask{}, fmt.Errorf("media item '%s' already exists. Upload with different name", mediaName) + } + } + + catalogItemUploadURL, err := findCatalogItemUploadLink(cat, "application/vnd.vmware.vcloud.media+xml") + if err != nil { + return UploadTask{}, err + } + + media, err := createMedia(cat.client, catalogItemUploadURL.String(), mediaName, mediaDescription, fileSize) + if err != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Issue creating media: %#v", err) + } + + createdMedia, err := queryMedia(cat.client, media.Entity.HREF, mediaName) + if err != nil { + return UploadTask{}, err + } + + return executeUpload(cat.client, createdMedia, mediaFilePath, mediaName, fileSize, uploadPieceSize) +} + +// Refresh gets a fresh copy of the catalog from vCD +func (cat *Catalog) Refresh() error { + if cat == nil || *cat == (Catalog{}) || cat.Catalog.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty or HREF is empty") + } + + refreshedCatalog := &types.Catalog{} + + _, err := cat.client.ExecuteRequest(cat.Catalog.HREF, http.MethodGet, + "", "error refreshing VDC: %s", nil, refreshedCatalog) + if err != nil { + return err + } + cat.Catalog = refreshedCatalog + + return nil +} + +// GetCatalogItemByHref finds a CatalogItem by HREF +// On success, returns a pointer to the CatalogItem structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetCatalogItemByHref(catalogItemHref string) (*CatalogItem, error) { + + catItem := NewCatalogItem(cat.client) + + _, err := cat.client.ExecuteRequest(catalogItemHref, http.MethodGet, + "", "error retrieving catalog item: %s", nil, catItem.CatalogItem) + if err != nil { + return nil, err + } + return catItem, nil +} + +// GetVappTemplateByHref finds a vApp template by HREF +// On success, returns a pointer to the vApp template structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetVappTemplateByHref(href string) (*VAppTemplate, error) { + + vappTemplate := NewVAppTemplate(cat.client) + + _, err := cat.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving catalog item: %s", nil, vappTemplate.VAppTemplate) + if err != nil { + return nil, err + } + return vappTemplate, nil +} + +// GetCatalogItemByName finds a CatalogItem by Name +// On success, returns a pointer to the CatalogItem structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetCatalogItemByName(catalogItemName string, refresh bool) (*CatalogItem, error) { + if refresh { + err := cat.Refresh() + if err != nil { + return nil, err + } + } + for _, catalogItems := range cat.Catalog.CatalogItems { + for _, catalogItem := range catalogItems.CatalogItem { + if catalogItem.Name == catalogItemName && catalogItem.Type == "application/vnd.vmware.vcloud.catalogItem+xml" { + return cat.GetCatalogItemByHref(catalogItem.HREF) + } + } + } + return nil, ErrorEntityNotFound +} + +// GetCatalogItemById finds a Catalog Item by ID +// On success, returns a pointer to the CatalogItem structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetCatalogItemById(catalogItemId string, refresh bool) (*CatalogItem, error) { + if refresh { + err := cat.Refresh() + if err != nil { + return nil, err + } + } + for _, catalogItems := range cat.Catalog.CatalogItems { + for _, catalogItem := range catalogItems.CatalogItem { + if equalIds(catalogItemId, catalogItem.ID, catalogItem.HREF) && catalogItem.Type == "application/vnd.vmware.vcloud.catalogItem+xml" { + return cat.GetCatalogItemByHref(catalogItem.HREF) + } + } + } + return nil, ErrorEntityNotFound +} + +// GetCatalogItemByNameOrId finds a Catalog Item by Name or ID +// On success, returns a pointer to the CatalogItem structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetCatalogItemByNameOrId(identifier string, refresh bool) (*CatalogItem, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return cat.GetCatalogItemByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return cat.GetCatalogItemById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*CatalogItem), err +} + +// QueryMediaList retrieves a list of media items for the catalog +func (catalog *Catalog) QueryMediaList() ([]*types.MediaRecordType, error) { + typeMedia := "media" + if catalog.client.IsSysAdmin { + typeMedia = "adminMedia" + } + + filter := fmt.Sprintf("catalog==" + url.QueryEscape(catalog.Catalog.HREF)) + results, err := catalog.client.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, "filter": filter, "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error querying medias %s", err) + } + + mediaResults := results.Results.MediaRecord + if catalog.client.IsSysAdmin { + mediaResults = results.Results.AdminMediaRecord + } + return mediaResults, nil +} + +// getOrgInfo finds the organization to which the catalog belongs, and returns its name and ID +func (catalog *Catalog) getOrgInfo() (*TenantContext, error) { + org := catalog.parent + if org == nil { + return nil, fmt.Errorf("no parent found for catalog %s", catalog.Catalog.Name) + } + + return org.tenantContext() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalogitem.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalogitem.go new file mode 100644 index 000000000..06f7ab2cf --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/catalogitem.go @@ -0,0 +1,128 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type CatalogItem struct { + CatalogItem *types.CatalogItem + client *Client +} + +func NewCatalogItem(cli *Client) *CatalogItem { + return &CatalogItem{ + CatalogItem: new(types.CatalogItem), + client: cli, + } +} + +func (catalogItem *CatalogItem) GetVAppTemplate() (VAppTemplate, error) { + + cat := NewVAppTemplate(catalogItem.client) + + _, err := catalogItem.client.ExecuteRequest(catalogItem.CatalogItem.Entity.HREF, http.MethodGet, + "", "error retrieving vApp template: %s", nil, cat.VAppTemplate) + + // The request was successful + return *cat, err + +} + +// Delete deletes the Catalog Item, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-CatalogItem.html +func (catalogItem *CatalogItem) Delete() error { + util.Logger.Printf("[TRACE] Deleting catalog item: %#v", catalogItem.CatalogItem) + catalogItemHREF := catalogItem.client.VCDHREF + catalogItemHREF.Path += "/catalogItem/" + catalogItem.CatalogItem.ID[23:] + + util.Logger.Printf("[TRACE] Url for deleting catalog item: %#v and name: %s", catalogItemHREF, catalogItem.CatalogItem.Name) + + return catalogItem.client.ExecuteRequestWithoutResponse(catalogItemHREF.String(), http.MethodDelete, + "", "error deleting Catalog item: %s", nil) +} + +// queryCatalogItemList returns a list of Catalog Item for the given parent +func queryCatalogItemList(client *Client, parentField, parentValue string) ([]*types.QueryResultCatalogItemType, error) { + + catalogItemType := types.QtCatalogItem + if client.IsSysAdmin { + catalogItemType = types.QtAdminCatalogItem + } + + filterText := fmt.Sprintf("%s==%s", parentField, url.QueryEscape(parentValue)) + + results, err := client.cumulativeQuery(catalogItemType, nil, map[string]string{ + "type": catalogItemType, + "filter": filterText, + }) + if err != nil { + return nil, fmt.Errorf("error querying catalog items %s", err) + } + + if client.IsSysAdmin { + return results.Results.AdminCatalogItemRecord, nil + } else { + return results.Results.CatalogItemRecord, nil + } +} + +// QueryCatalogItemList returns a list of Catalog Item for the given catalog +func (catalog *Catalog) QueryCatalogItemList() ([]*types.QueryResultCatalogItemType, error) { + return queryCatalogItemList(catalog.client, "catalog", catalog.Catalog.ID) +} + +// QueryCatalogItemList returns a list of Catalog Item for the given VDC +func (vdc *Vdc) QueryCatalogItemList() ([]*types.QueryResultCatalogItemType, error) { + return queryCatalogItemList(vdc.client, "vdc", vdc.Vdc.ID) +} + +// QueryCatalogItemList returns a list of Catalog Item for the given Admin VDC +func (vdc *AdminVdc) QueryCatalogItemList() ([]*types.QueryResultCatalogItemType, error) { + return queryCatalogItemList(vdc.client, "vdc", vdc.AdminVdc.ID) +} + +// queryVappTemplateList returns a list of vApp templates for the given parent +func queryVappTemplateList(client *Client, parentField, parentValue string) ([]*types.QueryResultVappTemplateType, error) { + + vappTemplateType := types.QtVappTemplate + if client.IsSysAdmin { + vappTemplateType = types.QtAdminVappTemplate + } + results, err := client.cumulativeQuery(vappTemplateType, nil, map[string]string{ + "type": vappTemplateType, + "filter": fmt.Sprintf("%s==%s", parentField, url.QueryEscape(parentValue)), + }) + if err != nil { + return nil, fmt.Errorf("error querying vApp templates %s", err) + } + + if client.IsSysAdmin { + return results.Results.AdminVappTemplateRecord, nil + } else { + return results.Results.VappTemplateRecord, nil + } +} + +// QueryVappTemplateList returns a list of vApp templates for the given VDC +func (vdc *Vdc) QueryVappTemplateList() ([]*types.QueryResultVappTemplateType, error) { + return queryVappTemplateList(vdc.client, "vdcName", vdc.Vdc.Name) +} + +// QueryVappTemplateList returns a list of vApp templates for the given VDC +func (vdc *AdminVdc) QueryVappTemplateList() ([]*types.QueryResultVappTemplateType, error) { + return queryVappTemplateList(vdc.client, "vdcName", vdc.AdminVdc.Name) +} + +// QueryVappTemplateList returns a list of vApp templates for the given catalog +func (catalog *Catalog) QueryVappTemplateList() ([]*types.QueryResultVappTemplateType, error) { + return queryVappTemplateList(catalog.client, "catalogName", catalog.Catalog.Name) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/certificate_management.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/certificate_management.go new file mode 100644 index 000000000..58bbf62a2 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/certificate_management.go @@ -0,0 +1,300 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "github.com/vmware/go-vcloud-director/v2/types/v56" + "net/url" +) + +// Certificate is a structure defining a certificate in VCD +// It is called "Certificate Library" in the UI, and "Certificate Library item" in the API +type Certificate struct { + CertificateLibrary *types.CertificateLibraryItem + Href string + client *Client +} + +// GetCertificateFromLibraryById Returns certificate from library of certificates +func getCertificateFromLibraryById(client *Client, id string, additionalHeader map[string]string) (*Certificate, error) { + endpoint, err := getEndpointByVersion(client) + if err != nil { + return nil, err + } + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty certificate ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + certificate := &Certificate{ + CertificateLibrary: &types.CertificateLibraryItem{}, + client: client, + Href: urlRef.String(), + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, certificate.CertificateLibrary, additionalHeader) + if err != nil { + return nil, err + } + + return certificate, nil +} + +func getEndpointByVersion(client *Client) (string, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibrary + newerApiVersion, err := client.VersionEqualOrGreater("10.3", 3) + if err != nil { + return "", err + } + if !newerApiVersion { + // in previous version exist only API with mistype in name + endpoint = types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibraryOld + } + return endpoint, err +} + +// GetCertificateFromLibraryById Returns certificate from library of certificates from System Context +func (client *Client) GetCertificateFromLibraryById(id string) (*Certificate, error) { + return getCertificateFromLibraryById(client, id, nil) +} + +// GetCertificateFromLibraryById Returns certificate from library of certificates from Org context +func (adminOrg *AdminOrg) GetCertificateFromLibraryById(id string) (*Certificate, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getCertificateFromLibraryById(adminOrg.client, id, getTenantContextHeader(tenantContext)) +} + +// addCertificateToLibrary uploads certificates with configuration details +func addCertificateToLibrary(client *Client, certificateConfig *types.CertificateLibraryItem, + additionalHeader map[string]string) (*Certificate, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibrary + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponse := &Certificate{ + CertificateLibrary: &types.CertificateLibraryItem{}, + client: client, + Href: urlRef.String(), + } + + err = client.OpenApiPostItem(apiVersion, urlRef, nil, + certificateConfig, typeResponse.CertificateLibrary, additionalHeader) + if err != nil { + return nil, err + } + + return typeResponse, nil +} + +// AddCertificateToLibrary uploads certificates with configuration details +func (adminOrg *AdminOrg) AddCertificateToLibrary(certificateConfig *types.CertificateLibraryItem) (*Certificate, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return addCertificateToLibrary(adminOrg.client, certificateConfig, getTenantContextHeader(tenantContext)) +} + +// AddCertificateToLibrary uploads certificates with configuration details +func (client *Client) AddCertificateToLibrary(certificateConfig *types.CertificateLibraryItem) (*Certificate, error) { + return addCertificateToLibrary(client, certificateConfig, nil) +} + +// getAllCertificateFromLibrary retrieves all certificates. Query parameters can be supplied to perform additional +// filtering +func getAllCertificateFromLibrary(client *Client, queryParameters url.Values, additionalHeader map[string]string) ([]*Certificate, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibrary + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + responses := []*types.CertificateLibraryItem{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &responses, additionalHeader) + if err != nil { + return nil, err + } + + var wrappedCertificates []*Certificate + for _, response := range responses { + urlRef, err := client.OpenApiBuildEndpoint(endpoint, response.Id) + if err != nil { + return nil, err + } + wrappedCertificate := &Certificate{ + CertificateLibrary: response, + client: client, + Href: urlRef.String(), + } + wrappedCertificates = append(wrappedCertificates, wrappedCertificate) + } + + return wrappedCertificates, nil +} + +// GetAllCertificatesFromLibrary retrieves all available certificates from certificate library. +// Query parameters can be supplied to perform additional filtering +func (client *Client) GetAllCertificatesFromLibrary(queryParameters url.Values) ([]*Certificate, error) { + return getAllCertificateFromLibrary(client, queryParameters, nil) +} + +// GetAllCertificatesFromLibrary r retrieves all available certificates from certificate library. +// Query parameters can be supplied to perform additional filtering +func (adminOrg *AdminOrg) GetAllCertificatesFromLibrary(queryParameters url.Values) ([]*Certificate, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getAllCertificateFromLibrary(adminOrg.client, queryParameters, getTenantContextHeader(tenantContext)) +} + +// getCertificateFromLibraryByName retrieves certificate from certificate library by given name +// When the alias contains commas, semicolons or asterisks, the encoding is rejected by the API in VCD 10.2 version. +// For this reason, when one or more commas, semicolons or asterisks are present we run the search brute force, +// by fetching all certificates and comparing the alias. Yet, this not needed anymore in VCD 10.3 version. +// Also, url.QueryEscape as well as url.Values.Encode() both encode the space as a + character. So we use +// search brute force too. Reference to issue: +// https://github.com/golang/go/issues/4013 +// https://github.com/czos/goamz/pull/11/files +func getCertificateFromLibraryByName(client *Client, name string, additionalHeader map[string]string) (*Certificate, error) { + slowSearch, params, err := shouldDoSlowSearch("alias", name, client) + if err != nil { + return nil, err + } + + var foundCertificates []*Certificate + certificates, err := getAllCertificateFromLibrary(client, params, additionalHeader) + if err != nil { + return nil, err + } + if len(certificates) == 0 { + return nil, ErrorEntityNotFound + } + foundCertificates = append(foundCertificates, certificates[0]) + + if slowSearch { + foundCertificates = nil + for _, certificate := range certificates { + if certificate.CertificateLibrary.Alias == name { + foundCertificates = append(foundCertificates, certificate) + } + } + if len(foundCertificates) == 0 { + return nil, ErrorEntityNotFound + } + if len(foundCertificates) > 1 { + return nil, fmt.Errorf("more than one certificate found with name '%s'", name) + } + } + + if len(certificates) > 1 && !slowSearch { + { + return nil, fmt.Errorf("more than one certificate found with name '%s'", name) + } + } + return foundCertificates[0], nil +} + +// GetCertificateFromLibraryByName retrieves certificate from certificate library by given name +func (client *Client) GetCertificateFromLibraryByName(name string) (*Certificate, error) { + return getCertificateFromLibraryByName(client, name, nil) +} + +// GetCertificateFromLibraryByName retrieves certificate from certificate library by given name +func (adminOrg *AdminOrg) GetCertificateFromLibraryByName(name string) (*Certificate, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getCertificateFromLibraryByName(adminOrg.client, name, getTenantContextHeader(tenantContext)) +} + +// Update updates existing Certificate. Allows changing only alias and description +func (certificate *Certificate) Update() (*Certificate, error) { + endpoint, err := getEndpointByVersion(certificate.client) + if err != nil { + return nil, err + } + minimumApiVersion, err := certificate.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if certificate.CertificateLibrary.Id == "" { + return nil, fmt.Errorf("cannot update certificate without id") + } + + urlRef, err := certificate.client.OpenApiBuildEndpoint(endpoint, certificate.CertificateLibrary.Id) + if err != nil { + return nil, err + } + + returnCertificate := &Certificate{ + CertificateLibrary: &types.CertificateLibraryItem{}, + client: certificate.client, + } + + err = certificate.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, certificate.CertificateLibrary, + returnCertificate.CertificateLibrary, nil) + if err != nil { + return nil, fmt.Errorf("error updating certificate: %s", err) + } + + return returnCertificate, nil +} + +// Delete deletes certificate from Certificate library +func (certificate *Certificate) Delete() error { + endpoint, err := getEndpointByVersion(certificate.client) + if err != nil { + return err + } + minimumApiVersion, err := certificate.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if certificate.CertificateLibrary.Id == "" { + return fmt.Errorf("cannot delete certificate without id") + } + + urlRef, err := certificate.client.OpenApiBuildEndpoint(endpoint, certificate.CertificateLibrary.Id) + if err != nil { + return err + } + + err = certificate.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting certificate: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/disk.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/disk.go new file mode 100644 index 000000000..24c1242f7 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/disk.go @@ -0,0 +1,431 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// Independent disk +type Disk struct { + Disk *types.Disk + client *Client +} + +// Independent disk query record +type DiskRecord struct { + Disk *types.DiskRecordType + client *Client +} + +// Init independent disk struct +func NewDisk(cli *Client) *Disk { + return &Disk{ + Disk: new(types.Disk), + client: cli, + } +} + +// Create instance with reference to types.DiskRecordType +func NewDiskRecord(cli *Client) *DiskRecord { + return &DiskRecord{ + Disk: new(types.DiskRecordType), + client: cli, + } +} + +// Create an independent disk in VDC +// Reference: vCloud API Programming Guide for Service Providers vCloud API 30.0 PDF Page 102 - 103, +// https://vdc-download.vmware.com/vmwb-repository/dcr-public/1b6cf07d-adb3-4dba-8c47-9c1c92b04857/ +// 241956dd-e128-4fcc-8131-bf66e1edd895/vcloud_sp_api_guide_30_0.pdf +func (vdc *Vdc) CreateDisk(diskCreateParams *types.DiskCreateParams) (Task, error) { + util.Logger.Printf("[TRACE] Create disk, name: %s, size: %d \n", + diskCreateParams.Disk.Name, + diskCreateParams.Disk.SizeMb, + ) + + if diskCreateParams.Disk.Name == "" { + return Task{}, fmt.Errorf("disk name is required") + } + + var err error + var createDiskLink *types.Link + + // Find the proper link for request + for _, vdcLink := range vdc.Vdc.Link { + if vdcLink.Rel == types.RelAdd && vdcLink.Type == types.MimeDiskCreateParams { + util.Logger.Printf("[TRACE] Create disk - found the proper link for request, HREF: %s, name: %s, type: %s, id: %s, rel: %s \n", + vdcLink.HREF, + vdcLink.Name, + vdcLink.Type, + vdcLink.ID, + vdcLink.Rel) + createDiskLink = vdcLink + break + } + } + + if createDiskLink == nil { + return Task{}, fmt.Errorf("could not find request URL for create disk in vdc Link") + } + + // Prepare the request payload + diskCreateParams.Xmlns = types.XMLNamespaceVCloud + + disk := NewDisk(vdc.client) + + _, err = vdc.client.ExecuteRequest(createDiskLink.HREF, http.MethodPost, + createDiskLink.Type, "error create disk: %s", diskCreateParams, disk.Disk) + if err != nil { + return Task{}, err + } + // Obtain disk task + if disk.Disk.Tasks.Task == nil || len(disk.Disk.Tasks.Task) == 0 { + return Task{}, errors.New("error cannot find disk creation task in API response") + } + task := NewTask(vdc.client) + task.Task = disk.Disk.Tasks.Task[0] + + util.Logger.Printf("[TRACE] AFTER CREATE DISK\n %s\n", prettyDisk(*disk.Disk)) + // Return the disk + return *task, nil +} + +// Update an independent disk +// 1 Verify the independent disk is not connected to any VM +// 2 Use newDiskInfo to change update the independent disk +// 3 Return task of independent disk update +// If the independent disk is connected to a VM, the task will be failed. +// Reference: vCloud API Programming Guide for Service Providers vCloud API 30.0 PDF Page 104 - 106, +// https://vdc-download.vmware.com/vmwb-repository/dcr-public/1b6cf07d-adb3-4dba-8c47-9c1c92b04857/ +// 241956dd-e128-4fcc-8131-bf66e1edd895/vcloud_sp_api_guide_30_0.pdf +func (disk *Disk) Update(newDiskInfo *types.Disk) (Task, error) { + util.Logger.Printf("[TRACE] Update disk, name: %s, size: %d, HREF: %s \n", + newDiskInfo.Name, + newDiskInfo.SizeMb, + disk.Disk.HREF, + ) + + var err error + + if newDiskInfo.Name == "" { + return Task{}, fmt.Errorf("disk name is required") + } + + // Verify the independent disk is not connected to any VM + vmRef, err := disk.AttachedVM() + if err != nil { + return Task{}, fmt.Errorf("error find attached VM: %s", err) + } + if vmRef != nil { + return Task{}, errors.New("error disk is attached") + } + + var updateDiskLink *types.Link + + // Find the proper link for request + for _, diskLink := range disk.Disk.Link { + if diskLink.Rel == types.RelEdit && diskLink.Type == types.MimeDisk { + util.Logger.Printf("[TRACE] Update disk - found the proper link for request, HREF: %s, name: %s, type: %s,id: %s, rel: %s \n", + diskLink.HREF, + diskLink.Name, + diskLink.Type, + diskLink.ID, + diskLink.Rel) + updateDiskLink = diskLink + break + } + } + + if updateDiskLink == nil { + return Task{}, fmt.Errorf("could not find request URL for update disk in disk Link") + } + + // Prepare the request payload + xmlPayload := &types.Disk{ + Xmlns: types.XMLNamespaceVCloud, + Description: newDiskInfo.Description, + SizeMb: newDiskInfo.SizeMb, + Name: newDiskInfo.Name, + StorageProfile: newDiskInfo.StorageProfile, + Owner: newDiskInfo.Owner, + } + + // Return the task + return disk.client.ExecuteTaskRequest(updateDiskLink.HREF, http.MethodPut, + updateDiskLink.Type, "error updating disk: %s", xmlPayload) +} + +// Remove an independent disk +// 1 Verify the independent disk is not connected to any VM +// 2 Delete the independent disk. Make a DELETE request to the URL in the rel="remove" link in the Disk +// 3 Return task of independent disk deletion +// If the independent disk is connected to a VM, the task will be failed. +// Reference: vCloud API Programming Guide for Service Providers vCloud API 30.0 PDF Page 106 - 107, +// https://vdc-download.vmware.com/vmwb-repository/dcr-public/1b6cf07d-adb3-4dba-8c47-9c1c92b04857/ +// 241956dd-e128-4fcc-8131-bf66e1edd895/vcloud_sp_api_guide_30_0.pdf +func (disk *Disk) Delete() (Task, error) { + util.Logger.Printf("[TRACE] Delete disk, HREF: %s \n", disk.Disk.HREF) + + var err error + + // Verify the independent disk is not connected to any VM + vmRef, err := disk.AttachedVM() + if err != nil { + return Task{}, fmt.Errorf("error find attached VM: %s", err) + } + if vmRef != nil { + return Task{}, errors.New("error disk is attached") + } + + var deleteDiskLink *types.Link + + // Find the proper link for request + for _, diskLink := range disk.Disk.Link { + if diskLink.Rel == types.RelRemove { + util.Logger.Printf("[TRACE] Delete disk - found the proper link for request, HREF: %s, name: %s, type: %s,id: %s, rel: %s \n", + diskLink.HREF, + diskLink.Name, + diskLink.Type, + diskLink.ID, + diskLink.Rel) + deleteDiskLink = diskLink + break + } + } + + if deleteDiskLink == nil { + return Task{}, fmt.Errorf("could not find request URL for delete disk in disk Link") + } + + // Return the task + return disk.client.ExecuteTaskRequest(deleteDiskLink.HREF, http.MethodDelete, + "", "error delete disk: %s", nil) +} + +// Refresh the disk information by disk href +func (disk *Disk) Refresh() error { + if disk.Disk == nil || disk.Disk.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + util.Logger.Printf("[TRACE] Disk refresh, HREF: %s\n", disk.Disk.HREF) + + unmarshalledDisk := &types.Disk{} + + _, err := disk.client.ExecuteRequest(disk.Disk.HREF, http.MethodGet, + "", "error refreshing independent disk: %s", nil, unmarshalledDisk) + if err != nil { + return err + } + disk.Disk = unmarshalledDisk + + // The request was successful + return nil +} + +// Get a VM that is attached the disk +// An independent disk can be attached to at most one virtual machine. +// If the disk isn't attached to any VM, return empty VM reference and no error. +// Otherwise return the first VM reference and no error. +// Reference: vCloud API Programming Guide for Service Providers vCloud API 30.0 PDF Page 107, +// https://vdc-download.vmware.com/vmwb-repository/dcr-public/1b6cf07d-adb3-4dba-8c47-9c1c92b04857/ +// 241956dd-e128-4fcc-8131-bf66e1edd895/vcloud_sp_api_guide_30_0.pdf +func (disk *Disk) AttachedVM() (*types.Reference, error) { + util.Logger.Printf("[TRACE] Disk attached VM, HREF: %s\n", disk.Disk.HREF) + + var attachedVMLink *types.Link + + // Find the proper link for request + for _, diskLink := range disk.Disk.Link { + if diskLink.Type == types.MimeVMs { + util.Logger.Printf("[TRACE] Disk attached VM - found the proper link for request, HREF: %s, name: %s, type: %s,id: %s, rel: %s \n", + diskLink.HREF, + diskLink.Name, + diskLink.Type, + diskLink.ID, + diskLink.Rel) + + attachedVMLink = diskLink + break + } + } + + if attachedVMLink == nil { + return nil, fmt.Errorf("could not find request URL for attached vm in disk Link") + } + + // Decode request + var vms = new(types.Vms) + + _, err := disk.client.ExecuteRequest(attachedVMLink.HREF, http.MethodGet, + attachedVMLink.Type, "error getting attached vms: %s", nil, vms) + if err != nil { + return nil, err + } + + // If disk is not attached to any VM + if vms.VmReference == nil { + return nil, nil + } + + // An independent disk can be attached to at most one virtual machine so return the first result of VM reference + return vms.VmReference, nil +} + +// Find an independent disk by disk href in VDC +// Deprecated: Use VDC.GetDiskByHref() +func (vdc *Vdc) FindDiskByHREF(href string) (*Disk, error) { + util.Logger.Printf("[TRACE] VDC find disk By HREF: %s\n", href) + + return FindDiskByHREF(vdc.client, href) +} + +// Find an independent disk by VDC client and disk href +// Deprecated: Use VDC.GetDiskByHref() +func FindDiskByHREF(client *Client, href string) (*Disk, error) { + util.Logger.Printf("[TRACE] Find disk By HREF: %s\n", href) + + disk := NewDisk(client) + + _, err := client.ExecuteRequest(href, http.MethodGet, + "", "error finding disk: %s", nil, disk.Disk) + + // Return the disk + return disk, err + +} + +// QueryDisk find independent disk using disk name. Returns DiskRecord type +func (vdc *Vdc) QueryDisk(diskName string) (DiskRecord, error) { + + if diskName == "" { + return DiskRecord{}, fmt.Errorf("disk name can not be empty") + } + + typeMedia := "disk" + if vdc.client.IsSysAdmin { + typeMedia = "adminDisk" + } + + results, err := vdc.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, "filter": "name==" + url.QueryEscape(diskName), "filterEncoded": "true"}) + if err != nil { + return DiskRecord{}, fmt.Errorf("error querying disk %s", err) + } + + diskResults := results.Results.DiskRecord + if vdc.client.IsSysAdmin { + diskResults = results.Results.AdminDiskRecord + } + + newDisk := NewDiskRecord(vdc.client) + + if len(diskResults) == 1 { + newDisk.Disk = diskResults[0] + } else { + return DiskRecord{}, fmt.Errorf("found results %d", len(diskResults)) + } + + return *newDisk, nil +} + +// QueryDisks find independent disks using disk name. Returns list of DiskRecordType +func (vdc *Vdc) QueryDisks(diskName string) (*[]*types.DiskRecordType, error) { + + if diskName == "" { + return nil, fmt.Errorf("disk name can't be empty") + } + + typeMedia := "disk" + if vdc.client.IsSysAdmin { + typeMedia = "adminDisk" + } + + results, err := vdc.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, "filter": "name==" + url.QueryEscape(diskName), "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error querying disks %s", err) + } + + diskResults := results.Results.DiskRecord + if vdc.client.IsSysAdmin { + diskResults = results.Results.AdminDiskRecord + } + + return &diskResults, nil +} + +// GetDiskByHref finds a Disk by HREF +// On success, returns a pointer to the Disk structure and a nil error +// On failure, returns a nil pointer and an error +func (vdc *Vdc) GetDiskByHref(diskHref string) (*Disk, error) { + util.Logger.Printf("[TRACE] Get Disk By Href: %s\n", diskHref) + Disk := NewDisk(vdc.client) + + _, err := vdc.client.ExecuteRequest(diskHref, http.MethodGet, + "", "error retrieving Disk: %#v", nil, Disk.Disk) + if err != nil && strings.Contains(err.Error(), "MajorErrorCode:403") { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, err + } + return Disk, nil +} + +// GetDisksByName finds one or more Disks by Name +// On success, returns a pointer to the Disk list and a nil error +// On failure, returns a nil pointer and an error +func (vdc *Vdc) GetDisksByName(diskName string, refresh bool) (*[]Disk, error) { + util.Logger.Printf("[TRACE] Get Disk By Name: %s\n", diskName) + var diskList []Disk + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, err + } + } + for _, resourceEntities := range vdc.Vdc.ResourceEntities { + for _, resourceEntity := range resourceEntities.ResourceEntity { + if resourceEntity.Name == diskName && resourceEntity.Type == "application/vnd.vmware.vcloud.disk+xml" { + disk, err := vdc.GetDiskByHref(resourceEntity.HREF) + if err != nil { + return nil, err + } + diskList = append(diskList, *disk) + } + } + } + if len(diskList) == 0 { + return nil, ErrorEntityNotFound + } + return &diskList, nil +} + +// GetDiskById finds a Disk by ID +// On success, returns a pointer to the Disk structure and a nil error +// On failure, returns a nil pointer and an error +func (vdc *Vdc) GetDiskById(diskId string, refresh bool) (*Disk, error) { + util.Logger.Printf("[TRACE] Get Disk By Id: %s\n", diskId) + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, err + } + } + for _, resourceEntities := range vdc.Vdc.ResourceEntities { + for _, resourceEntity := range resourceEntities.ResourceEntity { + if equalIds(diskId, resourceEntity.ID, resourceEntity.HREF) && resourceEntity.Type == "application/vnd.vmware.vcloud.disk+xml" { + return vdc.GetDiskByHref(resourceEntity.HREF) + } + } + } + return nil, ErrorEntityNotFound +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/edgegateway.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/edgegateway.go new file mode 100644 index 000000000..1ce36e4ed --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/edgegateway.go @@ -0,0 +1,1433 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "crypto/rand" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type EdgeGateway struct { + EdgeGateway *types.EdgeGateway + client *Client +} + +// Simplified structure used to list networks connected to an edge gateway +type SimpleNetworkIdentifier struct { + Name string + InterfaceType string +} + +var reErrorBusy = regexp.MustCompile(`is busy completing an operation.$`) + +func NewEdgeGateway(cli *Client) *EdgeGateway { + return &EdgeGateway{ + EdgeGateway: new(types.EdgeGateway), + client: cli, + } +} + +// Struct which covers NAT rule fields +type NatRule struct { + NatType string + NetworkHref string + ExternalIP string + ExternalPort string + InternalIP string + InternalPort string + Protocol string + IcmpSubType string + Description string +} + +// AddDhcpPool adds (or updates) the DHCP pool connected to a specific network. +// TODO: this is legacy code from 2015, which requires a Terraform structure to work. It may need some re-thinking. +func (egw *EdgeGateway) AddDhcpPool(network *types.OrgVDCNetwork, dhcppool []interface{}) (Task, error) { + newEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + util.Logger.Printf("[DEBUG] EDGE GATEWAY: %#v", newEdgeConfig) + util.Logger.Printf("[DEBUG] EDGE GATEWAY SERVICE: %#v", newEdgeConfig.GatewayDhcpService) + newDchpService := &types.GatewayDhcpService{} + if newEdgeConfig.GatewayDhcpService.Pool == nil { + newDchpService.IsEnabled = true + } else { + newDchpService.IsEnabled = newEdgeConfig.GatewayDhcpService.IsEnabled + + for _, dhcpPoolService := range newEdgeConfig.GatewayDhcpService.Pool { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + // Note: a simple comparison of HREF fields may fail if one of them is + // from a tenant object and the other from a provider object. They may have the + // same ID but different paths. Using 'equalIds' we determine equality even with + // different paths + if equalIds(network.HREF, "", dhcpPoolService.Network.HREF) { + continue + } + + newDchpService.Pool = append(newDchpService.Pool, dhcpPoolService) + } + } + + for _, item := range dhcppool { + data := item.(map[string]interface{}) + + if data["default_lease_time"] == nil { + data["default_lease_time"] = 3600 + } + + if data["max_lease_time"] == nil { + data["max_lease_time"] = 7200 + } + + dhcpRule := &types.DhcpPoolService{ + IsEnabled: true, + Network: &types.Reference{ + HREF: network.HREF, + Name: network.Name, + }, + DefaultLeaseTime: data["default_lease_time"].(int), + MaxLeaseTime: data["max_lease_time"].(int), + LowIPAddress: data["start_address"].(string), + HighIPAddress: data["end_address"].(string), + } + newDchpService.Pool = append(newDchpService.Pool, dhcpRule) + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + GatewayDhcpService: newDchpService, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + + var resp *http.Response + for { + buffer := bytes.NewBufferString(xml.Header + string(output)) + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + req := egw.client.NewRequest(map[string]string{}, http.MethodPost, *apiEndpoint, buffer) + util.Logger.Printf("[DEBUG] POSTING TO URL: %s", apiEndpoint.Path) + util.Logger.Printf("[DEBUG] XML TO SEND:\n%s", buffer) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err = checkResp(egw.client.Http.Do(req)) + if err != nil { + if reErrorBusy.MatchString(err.Error()) { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + break + } + + task := NewTask(egw.client) + + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil + +} + +// Deprecated: use one of RemoveNATRuleAsync, RemoveNATRule +func (egw *EdgeGateway) RemoveNATMapping(natType, externalIP, internalIP, port string) (Task, error) { + return egw.RemoveNATPortMapping(natType, externalIP, port, internalIP, port) +} + +// Deprecated: use one of RemoveNATRuleAsync, RemoveNATRule +func (egw *EdgeGateway) RemoveNATPortMapping(natType, externalIP, externalPort, internalIP, internalPort string) (Task, error) { + // Find uplink interface + var uplink types.Reference + for _, gi := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gi.InterfaceType != "uplink" { + continue + } + uplink = *gi.Network + } + + newEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newNatService := &types.NatService{} + + newNatService.IsEnabled = newEdgeConfig.NatService.IsEnabled + newNatService.NatType = newEdgeConfig.NatService.NatType + newNatService.Policy = newEdgeConfig.NatService.Policy + newNatService.ExternalIP = newEdgeConfig.NatService.ExternalIP + + for _, natRule := range newEdgeConfig.NatService.NatRule { + + if natRule.RuleType == natType && + natRule.GatewayNatRule.OriginalIP == externalIP && + natRule.GatewayNatRule.OriginalPort == externalPort && + natRule.GatewayNatRule.Interface.HREF == uplink.HREF { + util.Logger.Printf("[DEBUG] REMOVING %s Rule: %#v", natRule.RuleType, natRule.GatewayNatRule) + continue + } + util.Logger.Printf("[DEBUG] KEEPING %s Rule: %#v", natRule.RuleType, natRule.GatewayNatRule) + newNatService.NatRule = append(newNatService.NatRule, natRule) + } + + newEdgeConfig.NatService = newNatService + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + NatService: newNatService, + } + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newRules) + +} + +// RemoveNATRule removes NAT removes NAT rule identified by ID and handles task. Returns error if issues rise. +// Old functions RemoveNATPortMapping and RemoveNATMapping removed using rule details +// and expected interface to be of external network type. +func (egw *EdgeGateway) RemoveNATRule(id string) error { + task, err := egw.RemoveNATRuleAsync(id) + if err != nil { + return fmt.Errorf("error removing DNAT rule: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + return nil +} + +// RemoveNATRuleAsync removes NAT rule or returns an error. +// Old functions RemoveNATPortMapping and RemoveNATMapping removed using rule details +// and expected interface to be of external network type. +func (egw *EdgeGateway) RemoveNATRuleAsync(id string) (Task, error) { + if id == "" { + return Task{}, fmt.Errorf("provided id is empty") + } + + err := egw.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing edge gateway: %s", err) + } + + natServiceToUpdate := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService + ruleIndex := -1 + if natServiceToUpdate != nil { + for n, existingNatRule := range natServiceToUpdate.NatRule { + if existingNatRule.ID == id { + ruleIndex = n + break + } + } + } else { + return Task{}, fmt.Errorf("edge gateway doesn't have NAT rules") + } + + if ruleIndex == -1 { + return Task{}, fmt.Errorf("edge gateway doesn't have rule with such ID") + } + + if len(natServiceToUpdate.NatRule) > 1 { + natServiceToUpdate.NatRule = append(natServiceToUpdate.NatRule[:ruleIndex], natServiceToUpdate.NatRule[ruleIndex+1:]...) + } else { + natServiceToUpdate.NatRule = nil + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + NatService: natServiceToUpdate, + } + + egwConfigureHref := urlParseRequestURI(egw.EdgeGateway.HREF) + egwConfigureHref.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(egwConfigureHref.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newRules) +} + +// AddDNATRule creates DNAT rule and returns the NAT struct that was created or an error. +// Allows assigning a specific Org VDC or an external network. +// When edge gateway is advanced vCD API uses element to map with NSX edge gateway ID. A known issue is +// that updating rule using User interface resets and as result mapping is lost. +// Getting using NatRule.ID won't be valid anymore. +// Old functions AddNATPortMapping and AddNATMapping assigned rule only to first external network +func (egw *EdgeGateway) AddDNATRule(ruleDetails NatRule) (*types.NatRule, error) { + mappingId, err := getPseudoUuid() + if err != nil { + return nil, err + } + originalDescription := ruleDetails.Description + ruleDetails.Description = mappingId + + ruleDetails.NatType = "DNAT" + task, err := egw.AddNATRuleAsync(ruleDetails) + if err != nil { + return nil, fmt.Errorf("error creating DNAT rule: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + var createdNatRule *types.NatRule + + err = egw.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing edge gateway: %s", err) + } + + for _, natRule := range egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule { + if natRule.Description == mappingId { + createdNatRule = natRule + break + } + } + + if createdNatRule == nil { + return nil, fmt.Errorf("error creating DNAT rule, didn't match created rule") + } + + createdNatRule.Description = originalDescription + + return egw.UpdateNatRule(createdNatRule) +} + +// AddSNATRule creates SNAT rule and returns created NAT rule or error. +// Allows assigning a specific Org VDC or an external network. +// Old functions AddNATPortMapping and AddNATMapping aren't correct as assigned rule only to first external network +func (egw *EdgeGateway) AddSNATRule(networkHref, externalIP, internalIP, description string) (*types.NatRule, error) { + + // As vCD API doesn't return rule ID we get it manually: + // * create rule with description which value is our generated ID + // * find rule which has description with our generated ID + // * get the real (vCD's) rule ID + // * update description with real value and return nat rule + + mappingId, err := getPseudoUuid() + if err != nil { + return nil, err + } + + task, err := egw.AddNATRuleAsync(NatRule{NetworkHref: networkHref, NatType: "SNAT", ExternalIP: externalIP, + ExternalPort: "any", InternalIP: internalIP, InternalPort: "any", + IcmpSubType: "", Protocol: "any", Description: mappingId}) + if err != nil { + return nil, fmt.Errorf("error creating SNAT rule: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + var createdNatRule *types.NatRule + + err = egw.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing edge gateway: %s", err) + } + + for _, natRule := range egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule { + if natRule.Description == mappingId { + createdNatRule = natRule + break + } + } + + if createdNatRule == nil { + return nil, fmt.Errorf("error creating SNAT rule, didn't match created rule") + } + + createdNatRule.Description = description + + return egw.UpdateNatRule(createdNatRule) +} + +// getPseudoUuid creates unique ID/UUID +func getPseudoUuid() (string, error) { + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + return "", err + } + + uuid := fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + return uuid, nil +} + +// UpdateNatRule updates NAT rule and handles task. Returns updated NAT rule or error. +func (egw *EdgeGateway) UpdateNatRule(natRule *types.NatRule) (*types.NatRule, error) { + task, err := egw.UpdateNatRuleAsync(natRule) + if err != nil { + return nil, fmt.Errorf("error updating NAT rule: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + return egw.GetNatRule(natRule.ID) +} + +// UpdateNatRuleAsync updates NAT rule and returns task or error. +func (egw *EdgeGateway) UpdateNatRuleAsync(natRule *types.NatRule) (Task, error) { + if natRule.GatewayNatRule.Protocol != "" && !isValidProtocol(natRule.GatewayNatRule.Protocol) { + return Task{}, fmt.Errorf("provided protocol is not one of TCP, UDP, TCPUDP, ICMP, ANY") + } + + if strings.ToUpper(natRule.GatewayNatRule.Protocol) == "ICMP" && !isValidIcmpSubType(natRule.GatewayNatRule.IcmpSubType) { + return Task{}, fmt.Errorf("provided icmp sub type is not correct") + } + + err := egw.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing edge gateway: %s", err) + } + + natServiceToUpdate := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService + + if natServiceToUpdate != nil { + for n, existingNatRule := range natServiceToUpdate.NatRule { + if existingNatRule.ID == natRule.ID { + natServiceToUpdate.NatRule[n] = natRule + } + } + } else { + return Task{}, fmt.Errorf("edge gateway doesn't have such nat rule") + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + NatService: natServiceToUpdate, + } + + egwConfigureHref := urlParseRequestURI(egw.EdgeGateway.HREF) + egwConfigureHref.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(egwConfigureHref.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newRules) +} + +// GetNatRule returns NAT rule or error. +func (egw *EdgeGateway) GetNatRule(id string) (*types.NatRule, error) { + err := egw.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing edge gateway: %s", err) + } + + if egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService != nil { + for _, natRule := range egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule { + if natRule.ID == id { + return natRule, nil + } + } + } + + return nil, ErrorEntityNotFound +} + +// AddNATRuleAsync creates NAT rule and return task or err +// Allows assigning specific network Org VDC or external. Old function AddNATPortMapping and +// AddNATMapping function shouldn't be used because assigns rule to first external network +func (egw *EdgeGateway) AddNATRuleAsync(ruleDetails NatRule) (Task, error) { + if !isValidProtocol(ruleDetails.Protocol) { + return Task{}, fmt.Errorf("provided protocol is not one of TCP, UDP, TCPUDP, ICMP, ANY") + } + + if strings.ToUpper(ruleDetails.Protocol) == "ICMP" && !isValidIcmpSubType(ruleDetails.IcmpSubType) { + return Task{}, fmt.Errorf("provided icmp sub type is not correct") + } + + currentEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newNatService := &types.NatService{} + + if currentEdgeConfig.NatService == nil { + newNatService.IsEnabled = true + } else { + newNatService.IsEnabled = currentEdgeConfig.NatService.IsEnabled + newNatService.NatType = currentEdgeConfig.NatService.NatType + newNatService.Policy = currentEdgeConfig.NatService.Policy + newNatService.ExternalIP = currentEdgeConfig.NatService.ExternalIP + newNatService.NatRule = currentEdgeConfig.NatService.NatRule + } + + //construct new rule + natRule := &types.NatRule{ + RuleType: ruleDetails.NatType, + IsEnabled: takeBoolPointer(true), + Description: ruleDetails.Description, + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: ruleDetails.NetworkHref, + }, + OriginalIP: ruleDetails.ExternalIP, + OriginalPort: ruleDetails.ExternalPort, + TranslatedIP: ruleDetails.InternalIP, + TranslatedPort: ruleDetails.InternalPort, + Protocol: ruleDetails.Protocol, + IcmpSubType: ruleDetails.IcmpSubType, + }, + } + + newNatService.NatRule = append(newNatService.NatRule, natRule) + currentEdgeConfig.NatService = newNatService + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + NatService: newNatService, + } + + egwConfigureHref := urlParseRequestURI(egw.EdgeGateway.HREF) + egwConfigureHref.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(egwConfigureHref.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newRules) +} + +// Deprecated: Use eGW.AddSNATRule() or eGW.AddDNATRule() +func (egw *EdgeGateway) AddNATRule(network *types.OrgVDCNetwork, natType, externalIP, internalIP string) (Task, error) { + return egw.AddNATPortMappingWithUplink(network, natType, externalIP, "any", internalIP, "any", "any", "") +} + +// Deprecated: Use eGW.AddNATRule() +func (egw *EdgeGateway) AddNATMapping(natType, externalIP, internalIP string) (Task, error) { + return egw.AddNATPortMapping(natType, externalIP, "any", internalIP, "any", "any", "") +} + +// Deprecated: Use eGW.AddNATPortMappingWithUplink() +func (egw *EdgeGateway) AddNATPortMapping(natType, externalIP, externalPort, internalIP, internalPort, protocol, icmpSubType string) (Task, error) { + return egw.AddNATPortMappingWithUplink(nil, natType, externalIP, externalPort, internalIP, internalPort, protocol, icmpSubType) +} + +// Deprecated: creates not good behaviour of functionality +func (egw *EdgeGateway) getFirstUplink() types.Reference { + var uplink types.Reference + for _, gi := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gi.InterfaceType != "uplink" { + continue + } + uplink = *gi.Network + } + return uplink +} + +// Values are matched with VCD UI when creating DNAT for edge gateway. +func isValidProtocol(protocol string) bool { + switch strings.ToUpper(protocol) { + case + "TCP", + "UDP", + "TCPUDP", + "ICMP", + "ANY": + return true + } + return false +} + +// Used values are named here https://code.vmware.com/apis/287/vcloud#/doc/doc/types/GatewayNatRuleType.html +// Also can be matched in VCD UI when creating DNAT for edge gateway. +func isValidIcmpSubType(protocol string) bool { + switch strings.ToLower(protocol) { + case + "address-mask-request", + "address-mask-reply", + "destination-unreachable", + "echo-request", + "echo-reply", + "parameter-problem", + "redirect", + "router-advertisement", + "router-solicitation", + "source-quench", + "time-exceeded", + "timestamp-request", + "timestamp-reply", + "any": + return true + } + return false +} + +// Deprecated: Use eGW.AddDNATRule() or eGW.CreateNsxvNatRule() for NSX-V +func (egw *EdgeGateway) AddNATPortMappingWithUplink(network *types.OrgVDCNetwork, natType, externalIP, externalPort, internalIP, internalPort, protocol, icmpSubType string) (Task, error) { + // if a network is provided take it, otherwise find first uplink on the edge gateway + var uplinkRef string + + if network != nil { + uplinkRef = network.HREF + } else { + // TODO: remove when method used this removed + uplinkRef = egw.getFirstUplink().HREF + } + + if !isValidProtocol(protocol) { + return Task{}, fmt.Errorf("provided protocol is not one of TCP, UDP, TCPUDP, ICMP, ANY") + } + + if strings.ToUpper(protocol) == "ICMP" && !isValidIcmpSubType(icmpSubType) { + return Task{}, fmt.Errorf("provided icmp sub type is not correct") + } + + newEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newNatService := &types.NatService{} + + if newEdgeConfig.NatService == nil { + newNatService.IsEnabled = true + } else { + newNatService.IsEnabled = newEdgeConfig.NatService.IsEnabled + newNatService.NatType = newEdgeConfig.NatService.NatType + newNatService.Policy = newEdgeConfig.NatService.Policy + newNatService.ExternalIP = newEdgeConfig.NatService.ExternalIP + + for _, natRule := range newEdgeConfig.NatService.NatRule { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if natRule.RuleType == natType && + natRule.GatewayNatRule.OriginalIP == externalIP && + natRule.GatewayNatRule.OriginalPort == externalPort && + natRule.GatewayNatRule.TranslatedIP == internalIP && + natRule.GatewayNatRule.TranslatedPort == internalPort && + natRule.GatewayNatRule.Interface.HREF == uplinkRef { + continue + } + + newNatService.NatRule = append(newNatService.NatRule, natRule) + } + } + + //add rule + natRule := &types.NatRule{ + RuleType: natType, + IsEnabled: takeBoolPointer(true), + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplinkRef, + }, + OriginalIP: externalIP, + OriginalPort: externalPort, + TranslatedIP: internalIP, + TranslatedPort: internalPort, + Protocol: protocol, + IcmpSubType: icmpSubType, + }, + } + newNatService.NatRule = append(newNatService.NatRule, natRule) + + newEdgeConfig.NatService = newNatService + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + NatService: newNatService, + } + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newRules) +} + +func (egw *EdgeGateway) CreateFirewallRules(defaultAction string, rules []*types.FirewallRule) (Task, error) { + err := egw.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error: %s", err) + } + + newRules := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + FirewallService: &types.FirewallService{ + IsEnabled: true, + DefaultAction: defaultAction, + LogDefaultAction: true, + FirewallRule: rules, + }, + } + + output, err := xml.MarshalIndent(newRules, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error: %s", err) + } + + var resp *http.Response + for { + buffer := bytes.NewBufferString(xml.Header + string(output)) + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + req := egw.client.NewRequest(map[string]string{}, http.MethodPost, *apiEndpoint, buffer) + util.Logger.Printf("[DEBUG] POSTING TO URL: %s", apiEndpoint.Path) + util.Logger.Printf("[DEBUG] XML TO SEND:\n%s", buffer) + + req.Header.Add("Content-Type", "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml") + + resp, err = checkResp(egw.client.Http.Do(req)) + if err != nil { + if reErrorBusy.MatchString(err.Error()) { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error reconfiguring Edge Gateway: %s", err) + } + break + } + + task := NewTask(egw.client) + + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func (egw *EdgeGateway) Refresh() error { + + if egw.EdgeGateway == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + url := egw.EdgeGateway.HREF + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + egw.EdgeGateway = &types.EdgeGateway{} + + _, err := egw.client.ExecuteRequest(url, http.MethodGet, + "", "error retrieving Edge Gateway: %s", nil, egw.EdgeGateway) + + return err +} + +func (egw *EdgeGateway) Remove1to1Mapping(internal, external string) (Task, error) { + + // Refresh EdgeGateway rules + err := egw.Refresh() + if err != nil { + fmt.Printf("error: %s\n", err) + } + + var uplinkif string + for _, gifs := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gifs.InterfaceType == "uplink" { + uplinkif = gifs.Network.HREF + } + } + + newEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + // Take care of the NAT service + newNatService := &types.NatService{} + + // Copy over the NAT configuration + newNatService.IsEnabled = newEdgeConfig.NatService.IsEnabled + newNatService.NatType = newEdgeConfig.NatService.NatType + newNatService.Policy = newEdgeConfig.NatService.Policy + newNatService.ExternalIP = newEdgeConfig.NatService.ExternalIP + + for i, natRule := range newEdgeConfig.NatService.NatRule { + + // Kludgy IF to avoid deleting DNAT rules not created by us. + // If matches, let's skip it and continue the loop + if natRule.RuleType == "DNAT" && + natRule.GatewayNatRule.OriginalIP == external && + natRule.GatewayNatRule.TranslatedIP == internal && + natRule.GatewayNatRule.OriginalPort == "any" && + natRule.GatewayNatRule.TranslatedPort == "any" && + natRule.GatewayNatRule.Protocol == "any" && + natRule.GatewayNatRule.Interface.HREF == uplinkif { + continue + } + + // Kludgy IF to avoid deleting SNAT rules not created by us. + // If matches, let's skip it and continue the loop + if natRule.RuleType == "SNAT" && + natRule.GatewayNatRule.OriginalIP == internal && + natRule.GatewayNatRule.TranslatedIP == external && + natRule.GatewayNatRule.Interface.HREF == uplinkif { + continue + } + + // If doesn't match the above IFs, it's something we need to preserve, + // let's add it to the new NatService struct + newNatService.NatRule = append(newNatService.NatRule, newEdgeConfig.NatService.NatRule[i]) + + } + + // Fill the new NatService Section + newEdgeConfig.NatService = newNatService + + // Take care of the Firewall service + newFwService := &types.FirewallService{} + + // Copy over the firewall configuration + newFwService.IsEnabled = newEdgeConfig.FirewallService.IsEnabled + newFwService.DefaultAction = newEdgeConfig.FirewallService.DefaultAction + newFwService.LogDefaultAction = newEdgeConfig.FirewallService.LogDefaultAction + + for i, firewallRule := range newEdgeConfig.FirewallService.FirewallRule { + + // Kludgy IF to avoid deleting inbound FW rules not created by us. + // If matches, let's skip it and continue the loop + if firewallRule.Policy == "allow" && + firewallRule.Protocols.Any && + firewallRule.DestinationPortRange == "Any" && + firewallRule.SourcePortRange == "Any" && + firewallRule.SourceIP == "Any" && + firewallRule.DestinationIP == external { + continue + } + + // Kludgy IF to avoid deleting outbound FW rules not created by us. + // If matches, let's skip it and continue the loop + if firewallRule.Policy == "allow" && + firewallRule.Protocols.Any && + firewallRule.DestinationPortRange == "Any" && + firewallRule.SourcePortRange == "Any" && + firewallRule.SourceIP == internal && + firewallRule.DestinationIP == "Any" { + continue + } + + // If doesn't match the above IFs, it's something we need to preserve, + // let's add it to the new FirewallService struct + newFwService.FirewallRule = append(newFwService.FirewallRule, newEdgeConfig.FirewallService.FirewallRule[i]) + + } + + // Fill the new FirewallService Section + newEdgeConfig.FirewallService = newFwService + + // Fix + newEdgeConfig.NatService.IsEnabled = true + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newEdgeConfig) + +} + +func (egw *EdgeGateway) Create1to1Mapping(internal, external, description string) (Task, error) { + + // Refresh EdgeGateway rules + err := egw.Refresh() + if err != nil { + fmt.Printf("error: %s\n", err) + } + + var uplinkif string + for _, gifs := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + if gifs.InterfaceType == "uplink" { + uplinkif = gifs.Network.HREF + } + } + + newEdgeConfig := egw.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration + + snat := &types.NatRule{ + Description: description, + RuleType: "SNAT", + IsEnabled: takeBoolPointer(true), + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplinkif, + }, + OriginalIP: internal, + TranslatedIP: external, + Protocol: "any", + }, + } + + if newEdgeConfig.NatService == nil { + newEdgeConfig.NatService = &types.NatService{} + } + newEdgeConfig.NatService.NatRule = append(newEdgeConfig.NatService.NatRule, snat) + + dnat := &types.NatRule{ + Description: description, + RuleType: "DNAT", + IsEnabled: takeBoolPointer(true), + GatewayNatRule: &types.GatewayNatRule{ + Interface: &types.Reference{ + HREF: uplinkif, + }, + OriginalIP: external, + OriginalPort: "any", + TranslatedIP: internal, + TranslatedPort: "any", + Protocol: "any", + }, + } + + newEdgeConfig.NatService.NatRule = append(newEdgeConfig.NatService.NatRule, dnat) + + fwin := &types.FirewallRule{ + Description: description, + IsEnabled: true, + Policy: "allow", + Protocols: &types.FirewallRuleProtocols{ + Any: true, + }, + DestinationPortRange: "Any", + DestinationIP: external, + SourcePortRange: "Any", + SourceIP: "Any", + EnableLogging: false, + } + + newEdgeConfig.FirewallService.FirewallRule = append(newEdgeConfig.FirewallService.FirewallRule, fwin) + + fwout := &types.FirewallRule{ + Description: description, + IsEnabled: true, + Policy: "allow", + Protocols: &types.FirewallRuleProtocols{ + Any: true, + }, + DestinationPortRange: "Any", + DestinationIP: "Any", + SourcePortRange: "Any", + SourceIP: internal, + EnableLogging: false, + } + + newEdgeConfig.FirewallService.FirewallRule = append(newEdgeConfig.FirewallService.FirewallRule, fwout) + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", newEdgeConfig) + +} + +func (egw *EdgeGateway) AddIpsecVPN(ipsecVPNConfig *types.EdgeGatewayServiceConfiguration) (Task, error) { + + err := egw.Refresh() + if err != nil { + fmt.Printf("error: %s\n", err) + } + + ipsecVPNConfig.Xmlns = types.XMLNamespaceVCloud + + apiEndpoint := urlParseRequestURI(egw.EdgeGateway.HREF) + apiEndpoint.Path += "/action/configureServices" + + // Return the task + return egw.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGatewayServiceConfiguration+xml", "error reconfiguring Edge Gateway: %s", ipsecVPNConfig) + +} + +// Removes an Edge Gateway VPN, by passing an empty configuration +func (egw *EdgeGateway) RemoveIpsecVPN() (Task, error) { + err := egw.Refresh() + if err != nil { + fmt.Printf("error: %s\n", err) + } + ipsecVPNConfig := &types.EdgeGatewayServiceConfiguration{ + Xmlns: types.XMLNamespaceVCloud, + GatewayIpsecVpnService: &types.GatewayIpsecVpnService{ + IsEnabled: false, + }, + } + return egw.AddIpsecVPN(ipsecVPNConfig) +} + +// Deletes the edge gateway, returning a task and an error with the operation result. +// https://code.vmware.com/apis/442/vcloud-director/doc/doc/operations/DELETE-EdgeGateway.html +func (egw *EdgeGateway) DeleteAsync(force bool, recursive bool) (Task, error) { + util.Logger.Printf("[TRACE] EdgeGateway.Delete - deleting edge gateway with force: %t, recursive: %t", force, recursive) + + if egw.EdgeGateway.HREF == "" { + return Task{}, fmt.Errorf("cannot delete, HREF is missing") + } + + egwUrl, err := url.ParseRequestURI(egw.EdgeGateway.HREF) + if err != nil { + return Task{}, fmt.Errorf("error parsing edge gateway url: %s", err) + } + + req := egw.client.NewRequest(map[string]string{ + "force": strconv.FormatBool(force), + "recursive": strconv.FormatBool(recursive), + }, http.MethodDelete, *egwUrl, nil) + resp, err := checkResp(egw.client.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error deleting edge gateway: %s", err) + } + task := NewTask(egw.client) + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding task response: %s", err) + } + return *task, err +} + +// Deletes the edge gateway, returning an error with the operation result. +// https://code.vmware.com/apis/442/vcloud-director/doc/doc/operations/DELETE-EdgeGateway.html +func (egw *EdgeGateway) Delete(force bool, recursive bool) error { + + task, err := egw.DeleteAsync(force, recursive) + if err != nil { + return err + } + if task.Task.Status == "error" { + return fmt.Errorf(combinedTaskErrorMessage(task.Task, fmt.Errorf("edge gateway not properly destroyed"))) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf(combinedTaskErrorMessage(task.Task, err)) + } + + return nil +} + +// GetNetworks returns the list of networks associated with an edge gateway +// In the return structure, an interfaceType of "uplink" indicates an external network, +// while "internal" is for Org VDC routed networks +func (egw *EdgeGateway) GetNetworks() ([]SimpleNetworkIdentifier, error) { + var networks []SimpleNetworkIdentifier + err := egw.Refresh() + if err != nil { + return networks, err + } + for _, net := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + netIdentifier := SimpleNetworkIdentifier{ + Name: net.Name, + InterfaceType: net.InterfaceType, + } + networks = append(networks, netIdentifier) + } + + return networks, nil +} + +// HasDefaultGateway returns true if the edge gateway uses one of the external +// networks as default gateway +func (egw *EdgeGateway) HasDefaultGateway() bool { + if egw.EdgeGateway.Configuration != nil && + egw.EdgeGateway.Configuration.GatewayInterfaces != nil { + for _, gw := range egw.EdgeGateway.Configuration.GatewayInterfaces.GatewayInterface { + // Check if the interface is used for default route + if gw.UseForDefaultRoute { + // Look for a specific subnet which is used as a default route + for _, subnetParticipation := range gw.SubnetParticipation { + if subnetParticipation.UseForDefaultRoute && + subnetParticipation.Gateway != "" && + subnetParticipation.Netmask != "" { + return true + } + } + } + } + } + return false +} + +// HasAdvancedNetworking returns true if the edge gateway has advanced network configuration enabled +func (egw *EdgeGateway) HasAdvancedNetworking() bool { + return egw.EdgeGateway.Configuration != nil && + egw.EdgeGateway.Configuration.AdvancedNetworkingEnabled != nil && + *egw.EdgeGateway.Configuration.AdvancedNetworkingEnabled +} + +// buildProxiedEdgeEndpointURL helps to get root endpoint for Edge Gateway using the +// NSX API Proxy and can append optionalSuffix which must have its own leading / +func (egw *EdgeGateway) buildProxiedEdgeEndpointURL(optionalSuffix string) (string, error) { + apiEndpoint, err := url.ParseRequestURI(egw.EdgeGateway.HREF) + if err != nil { + return "", fmt.Errorf("unable to process edge gateway URL: %s", err) + } + edgeID := strings.Split(egw.EdgeGateway.ID, ":") + if len(edgeID) != 4 { + return "", fmt.Errorf("unable to find edge gateway id: %s", egw.EdgeGateway.ID) + } + hostname := apiEndpoint.Scheme + "://" + apiEndpoint.Host + "/network/edges/" + edgeID[3] + + if optionalSuffix != "" { + return hostname + optionalSuffix, nil + } + + return hostname, nil +} + +// GetLBGeneralParams retrieves load balancer configuration of `&types.LoadBalancer` and can be used +// to access global configuration options. These are 4 fields only: +// LoadBalancer.Enabled, LoadBalancer.AccelerationEnabled, LoadBalancer.Logging.Enable, +// LoadBalancer.Logging.LogLevel +func (egw *EdgeGateway) GetLBGeneralParams() (*types.LbGeneralParamsWithXml, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateway supports load balancing") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbConfigPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + loadBalancerConfig := &types.LbGeneralParamsWithXml{} + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read load balancer configuration: %s", nil, loadBalancerConfig) + + if err != nil { + return nil, err + } + + return loadBalancerConfig, nil +} + +// UpdateLBGeneralParams allows to update global load balancer configuration. +// It accepts four fields (Enabled, AccelerationEnabled, Logging.Enable, Logging.LogLevel) and uses +// them to construct types.LbGeneralParamsWithXml without altering other options to prevent config +// corruption. +// They are represented in load balancer global configuration tab in the UI. +func (egw *EdgeGateway) UpdateLBGeneralParams(enabled, accelerationEnabled, loggingEnabled bool, logLevel string) (*types.LbGeneralParamsWithXml, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateway supports load balancing") + } + + if err := validateUpdateLBGeneralParams(logLevel); err != nil { + return nil, err + } + // Retrieve load balancer to work on latest configuration + currentLb, err := egw.GetLBGeneralParams() + if err != nil { + return nil, fmt.Errorf("unable to retrieve load balancer before update: %s", err) + } + + // Check if change is needed. If not - return early. + if currentLb.Logging != nil && + currentLb.Enabled == enabled && currentLb.AccelerationEnabled == accelerationEnabled && + currentLb.Logging.Enable == loggingEnabled && currentLb.Logging.LogLevel == logLevel { + return currentLb, nil + } + + // Modify only the global configuration settings + currentLb.Enabled = enabled + currentLb.AccelerationEnabled = accelerationEnabled + currentLb.Logging = &types.LbLogging{ + Enable: loggingEnabled, + LogLevel: logLevel, + } + // Omit the version as it is updated automatically with each put + currentLb.Version = "" + + // Push updated configuration + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbConfigPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer config: %s", currentLb, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Retrieve configuration after update + updatedLb, err := egw.GetLBGeneralParams() + if err != nil { + return nil, fmt.Errorf("unable to retrieve load balancer config after update: %s", err) + } + + return updatedLb, nil +} + +// GetFirewallConfig retrieves firewall configuration and can be used +// to alter master configuration options. These are 3 fields only: +// FirewallConfigWithXml.Enabled, FirewallConfigWithXml.DefaultPolicy.LoggingEnabled and +// FirewallConfigWithXml.DefaultPolicy.Action +func (egw *EdgeGateway) GetFirewallConfig() (*types.FirewallConfigWithXml, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateway support firewall configuration") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeFirewallPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + firewallConfig := &types.FirewallConfigWithXml{} + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read firewall configuration: %s", nil, firewallConfig) + + if err != nil { + return nil, err + } + + return firewallConfig, nil +} + +// UpdateFirewallConfig allows to update firewall configuration. +// It accepts three fields (Enabled, DefaultLoggingEnabled, DefaultAction) and uses +// them to construct types.FirewallConfigWithXml without altering other options to prevent config +// corruption. +// They are represented in firewall configuration page in the UI. +func (egw *EdgeGateway) UpdateFirewallConfig(enabled, defaultLoggingEnabled bool, defaultAction string) (*types.FirewallConfigWithXml, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateway supports load balancing") + } + + if defaultAction != "accept" && defaultAction != "deny" { + return nil, fmt.Errorf("default action must be either 'accept' or 'deny'") + } + + // Retrieve firewall latest configuration + currentFw, err := egw.GetFirewallConfig() + if err != nil { + return nil, fmt.Errorf("unable to retrieve firewall config before update: %s", err) + } + + // Check if change is needed. If not - return early. + if currentFw.Enabled == enabled && currentFw.DefaultPolicy.LoggingEnabled == defaultLoggingEnabled && + currentFw.DefaultPolicy.Action == defaultAction { + return currentFw, nil + } + + // Modify only the global configuration settings + currentFw.Enabled = enabled + currentFw.DefaultPolicy.LoggingEnabled = defaultLoggingEnabled + currentFw.DefaultPolicy.Action = defaultAction + + // Omit the version as it is updated automatically with each put + currentFw.Version = "" + + // Push updated configuration + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeFirewallPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating firewall configuration : %s", currentFw, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Retrieve configuration after update + updatedFw, err := egw.GetFirewallConfig() + if err != nil { + return nil, fmt.Errorf("unable to retrieve firewall after update: %s", err) + } + + return updatedFw, nil +} + +// validateUpdateLoadBalancer validates mandatory fields for global load balancer configuration +// settings +func validateUpdateLBGeneralParams(logLevel string) error { + if logLevel == "" { + return fmt.Errorf("field Logging.LogLevel must be set to update load balancer") + } + + return nil +} + +// getVdcNetworks retrieves a structure of type EdgeGatewayInterfaces which contains network +// interfaces available in Edge Gateway (uses "/vdcNetworks" endpoint) +func (egw *EdgeGateway) getVdcNetworks() (*types.EdgeGatewayInterfaces, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateway supports vNics") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL("/vdcNetworks") + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + vnicConfig := &types.EdgeGatewayInterfaces{} + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to edge gateway vnic configuration: %s", nil, vnicConfig) + + if err != nil { + return nil, err + } + + return vnicConfig, nil +} + +// GetVnicIndexByNetworkNameAndType returns *int of vNic index for specified network name and network type +// networkType one of: 'internal', 'uplink', 'trunk', 'subinterface' +// networkName cannot be empty +func (egw *EdgeGateway) GetVnicIndexByNetworkNameAndType(networkName, networkType string) (*int, error) { + vnics, err := egw.getVdcNetworks() + if err != nil { + return nil, fmt.Errorf("cannot retrieve vNic configuration: %s", err) + } + return getVnicIndexByNetworkNameAndType(networkName, networkType, vnics) +} + +// GetAnyVnicIndexByNetworkName parses XML structure of vNic mapping to networks in edge gateway XML +// and returns *int of vNic index and network type by network name +// networkName cannot be empty +// networkType will be one of: 'internal', 'uplink', 'trunk', 'subinterface' +// +// Warning: this function assumes that there are no duplicate network names attached. If it is so +// this function will return the first network +func (egw *EdgeGateway) GetAnyVnicIndexByNetworkName(networkName string) (*int, string, error) { + vnics, err := egw.getVdcNetworks() + if err != nil { + return nil, "", fmt.Errorf("cannot retrieve vNic configuration: %s", err) + } + + var foundVnicIndex *int + var foundVnicType string + + possibleNicTypes := []string{types.EdgeGatewayVnicTypeUplink, types.EdgeGatewayVnicTypeInternal, + types.EdgeGatewayVnicTypeTrunk, types.EdgeGatewayVnicTypeSubinterface} + + for _, nicType := range possibleNicTypes { + vNicIndex, err := getVnicIndexByNetworkNameAndType(networkName, nicType, vnics) + if err == nil { // nil error means we have found nic + foundVnicIndex = vNicIndex + foundVnicType = nicType + break + } + } + + if foundVnicIndex == nil && foundVnicType == "" { + return nil, "", ErrorEntityNotFound + } + return foundVnicIndex, foundVnicType, nil +} + +// GetNetworkNameAndTypeByVnicIndex returns network name and network type for given vNic index +// returned networkType can be one of: 'internal', 'uplink', 'trunk', 'subinterface' +func (egw *EdgeGateway) GetNetworkNameAndTypeByVnicIndex(vNicIndex int) (string, string, error) { + vnics, err := egw.getVdcNetworks() + if err != nil { + return "", "", fmt.Errorf("cannot retrieve vNic configuration: %s", err) + } + return getNetworkNameAndTypeByVnicIndex(vNicIndex, vnics) +} + +// getVnicIndexByNetworkNameAndType is wrapped and used by public function GetVnicIndexByNetworkNameAndType +func getVnicIndexByNetworkNameAndType(networkName, networkType string, vnics *types.EdgeGatewayInterfaces) (*int, error) { + if networkName == "" { + return nil, fmt.Errorf("network name cannot be empty") + } + if networkType != types.EdgeGatewayVnicTypeUplink && + networkType != types.EdgeGatewayVnicTypeInternal && + networkType != types.EdgeGatewayVnicTypeTrunk && + networkType != types.EdgeGatewayVnicTypeSubinterface { + return nil, fmt.Errorf("networkType must be one of 'uplink', 'internal', 'trunk', 'subinterface'") + } + + var foundIndex *int + foundCount := 0 + + for _, vnic := range vnics.EdgeInterface { + // Look for matching portgroup name and network type. If the PortgroupName is not empty - + // check that it contains network name as well. + if vnic.Name == networkName && vnic.Type == networkType && + (vnic.PortgroupName == networkName || vnic.PortgroupName == "") { + foundIndex = vnic.Index + foundCount++ + } + } + + if foundCount > 1 { + return nil, fmt.Errorf("more than one (%d) networks of type '%s' with name '%s' found", + foundCount, networkType, networkName) + } + + if foundCount == 0 { + return nil, ErrorEntityNotFound + } + + return foundIndex, nil +} + +// getNetworkNameAndTypeByVnicIndex looks up network type and name in list of edge gateway interfaces +func getNetworkNameAndTypeByVnicIndex(vNicIndex int, vnics *types.EdgeGatewayInterfaces) (string, string, error) { + if vNicIndex < 0 { + return "", "", fmt.Errorf("vNic index cannot be negative") + } + + foundCount := 0 + var networkName, networkType string + + for _, vnic := range vnics.EdgeInterface { + if vnic.Index != nil && *vnic.Index == vNicIndex { + foundCount++ + networkName = vnic.Name + networkType = vnic.Type + } + } + + if foundCount > 1 { + return "", "", fmt.Errorf("more than one networks found for vNic %d", vNicIndex) + } + + if foundCount == 0 { + return "", "", ErrorEntityNotFound + } + + return networkName, networkType, nil +} + +// UpdateAsync updates the edge gateway in place with the information contained in the internal structure +func (egw *EdgeGateway) UpdateAsync() (Task, error) { + + egw.EdgeGateway.Xmlns = types.XMLNamespaceVCloud + egw.EdgeGateway.Configuration.Xmlns = types.XMLNamespaceVCloud + egw.EdgeGateway.Tasks = nil + + // Return the task + return egw.client.ExecuteTaskRequest(egw.EdgeGateway.HREF, http.MethodPut, + types.MimeEdgeGateway, "error updating Edge Gateway: %s", egw.EdgeGateway) +} + +// Update is a wrapper around UpdateAsync +// The pointer receiver is refreshed after update +func (egw *EdgeGateway) Update() error { + + task, err := egw.UpdateAsync() + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return err + } + return egw.Refresh() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/ejecttask.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/ejecttask.go new file mode 100644 index 000000000..bde44e83c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/ejecttask.go @@ -0,0 +1,92 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +type EjectTask struct { + *Task + vm *VM +} + +var timeBetweenRefresh = 3 * time.Second + +// Question Message from vCD API +const questionMessage = "Disconnect anyway and override the lock?" + +// Creates wrapped Task which is dedicated for eject media functionality and +// provides additional functionality to answer VM questions +func NewEjectTask(task *Task, vm *VM) *EjectTask { + return &EjectTask{ + task, + vm, + } +} + +// Checks the status of the task every 3 seconds and returns when the +// eject task is either completed or failed +func (ejectTask *EjectTask) WaitTaskCompletion(isAnswerYes bool) error { + return ejectTask.WaitInspectTaskCompletion(isAnswerYes, timeBetweenRefresh) +} + +// function which handles answers for ejecting +func (ejectTask *EjectTask) WaitInspectTaskCompletion(isAnswerYes bool, delay time.Duration) error { + + if ejectTask.Task == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + for { + err := ejectTask.Refresh() + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // If task is not in a waiting status we're done, check if there's an error and return it. + if ejectTask.Task.Task.Status != "queued" && ejectTask.Task.Task.Status != "preRunning" && ejectTask.Task.Task.Status != "running" { + if ejectTask.Task.Task.Status == "error" { + return fmt.Errorf("task did not complete succesfully: %s", ejectTask.Task.Task.Error.Message) + } + return nil + } + + question, err := ejectTask.vm.GetQuestion() + if err != nil { + return fmt.Errorf("task did not complete succesfully: %s, quering question for VM failed: %s", ejectTask.Task.Task.Description, err.Error()) + } + + if question.QuestionId != "" && strings.Contains(question.Question, questionMessage) { + var choiceToUse *types.VmQuestionAnswerChoiceType + for _, choice := range question.Choices { + if isAnswerYes { + if strings.Contains(choice.Text, "yes") { + choiceToUse = choice + } + } else { + if strings.Contains(choice.Text, "no") { + choiceToUse = choice + } + } + } + + if choiceToUse != nil { + err = ejectTask.vm.AnswerQuestion(question.QuestionId, choiceToUse.Id) + if err != nil { + return fmt.Errorf("task did not complete succesfully: %s, answering question for eject in VM failed: %s", ejectTask.Task.Task.Description, err.Error()) + } + } + + } + + // Sleep for a given period and try again. + time.Sleep(delay) + } +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/entity.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/entity.go new file mode 100644 index 000000000..a885c4832 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/entity.go @@ -0,0 +1,71 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +type genericGetter func(string, bool) (interface{}, error) + +// getEntityByNameOrId finds a generic entity by Name Or ID +// On success, returns an empty interface representing a pointer to the structure and a nil error +// On failure, returns a nil pointer and an error +// Example usage: +// +// func (org *Org) GetCatalogByNameOrId(identifier string, refresh bool) (*Catalog, error) { +// getByName := func(name string, refresh bool) (interface{}, error) { +// return org.GetCatalogByName(name, refresh) +// } +// getById := func(id string, refresh bool) (interface{}, error) { +// return org.GetCatalogById(id, refresh) +// } +// entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) +// if entity != nil { +// return nil, err +// } +// return entity.(*Catalog), err +// } +func getEntityByNameOrId(getByName, getById genericGetter, identifier string, refresh bool) (interface{}, error) { + + var byNameErr, byIdErr error + var entity interface{} + + entity, byIdErr = getById(identifier, refresh) + if byIdErr == nil { + // Found by ID + return entity, nil + } + if IsNotFound(byIdErr) { + // Not found by ID, try by name + entity, byNameErr = getByName(identifier, false) + return entity, byNameErr + } else { + // On any other error, we return it + return nil, byIdErr + } +} + +// getEntityByNameOrIdSkipNonId is like getEntityByNameOrId, but it does not even attempt to lookup "ById" if the +// identifier does not look like URN or UUID +func getEntityByNameOrIdSkipNonId(getByName, getById genericGetter, identifier string, refresh bool) (interface{}, error) { + + var byNameErr, byIdErr error + var entity interface{} + + // Only check by Id if it is an ID or an URN + if isUrn(identifier) || IsUuid(identifier) { + entity, byIdErr = getById(identifier, refresh) + if byIdErr == nil { + // Found by ID + return entity, nil + } + } + + if IsNotFound(byIdErr) || byIdErr == nil { + // Not found by ID, try by name + entity, byNameErr = getByName(identifier, false) + return entity, byNameErr + } else { + // On any other error, we return it + return nil, byIdErr + } +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/extension.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/extension.go new file mode 100644 index 000000000..1995ebed4 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/extension.go @@ -0,0 +1,34 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "github.com/vmware/go-vcloud-director/v2/types/v56" + "net/http" +) + +// Deprecated: please use GetExternalNetwork function instead +func GetExternalNetworkByName(vcdClient *VCDClient, networkName string) (*types.ExternalNetworkReference, error) { + extNetworkRefs := &types.ExternalNetworkReferences{} + + extNetworkHREF, err := getExternalNetworkHref(&vcdClient.Client) + if err != nil { + return &types.ExternalNetworkReference{}, err + } + + _, err = vcdClient.Client.ExecuteRequest(extNetworkHREF, http.MethodGet, + "", "error retrieving external networks: %s", nil, extNetworkRefs) + if err != nil { + return &types.ExternalNetworkReference{}, err + } + + for _, netRef := range extNetworkRefs.ExternalNetworkReference { + if netRef.Name == networkName { + return netRef, nil + } + } + + return &types.ExternalNetworkReference{}, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/external_network_v2.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/external_network_v2.go new file mode 100644 index 000000000..89128eff1 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/external_network_v2.go @@ -0,0 +1,192 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// ExternalNetworkV2 is a type for version 2 of external network which uses OpenAPI endpoint to +// manage external networks of both types (NSX-V and NSX-T) +type ExternalNetworkV2 struct { + ExternalNetwork *types.ExternalNetworkV2 + client *Client +} + +// CreateExternalNetworkV2 creates a new external network using OpenAPI endpoint. It can create +// NSX-V and NSX-T backed networks based on what ExternalNetworkV2.NetworkBackings is +// provided. types.ExternalNetworkV2 has documented fields. +func CreateExternalNetworkV2(vcdClient *VCDClient, newExtNet *types.ExternalNetworkV2) (*ExternalNetworkV2, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks + apiVersion, err := vcdClient.Client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vcdClient.Client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnExtNet := &ExternalNetworkV2{ + ExternalNetwork: &types.ExternalNetworkV2{}, + client: &vcdClient.Client, + } + + err = vcdClient.Client.OpenApiPostItem(apiVersion, urlRef, nil, newExtNet, returnExtNet.ExternalNetwork, nil) + if err != nil { + return nil, fmt.Errorf("error creating external network: %s", err) + } + + return returnExtNet, nil +} + +// GetExternalNetworkV2ById retrieves external network by given ID using OpenAPI endpoint +func GetExternalNetworkV2ById(vcdClient *VCDClient, id string) (*ExternalNetworkV2, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks + apiVersion, err := vcdClient.Client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty external network id") + } + + urlRef, err := vcdClient.Client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + extNet := &ExternalNetworkV2{ + ExternalNetwork: &types.ExternalNetworkV2{}, + client: &vcdClient.Client, + } + + err = vcdClient.Client.OpenApiGetItem(apiVersion, urlRef, nil, extNet.ExternalNetwork, nil) + if err != nil { + return nil, err + } + + return extNet, nil +} + +// GetExternalNetworkV2ByName retrieves external network by given name using OpenAPI endpoint. +// Returns an error if not exactly one network is found. +func GetExternalNetworkV2ByName(vcdClient *VCDClient, name string) (*ExternalNetworkV2, error) { + + if name == "" { + return nil, fmt.Errorf("name cannot be empty") + } + + queryParams := url.Values{} + queryParams.Add("filter", "name=="+name) + + res, err := GetAllExternalNetworksV2(vcdClient, queryParams) + if err != nil { + return nil, fmt.Errorf("could not find external network by name: %s", err) + } + + if len(res) == 0 { + return nil, fmt.Errorf("%s: expected exactly one external network with name '%s'. Got %d", ErrorEntityNotFound, name, len(res)) + } + + if len(res) > 1 { + return nil, fmt.Errorf("expected exactly one external network with name '%s'. Got %d", name, len(res)) + } + + return res[0], nil +} + +// GetAllExternalNetworksV2 retrieves all external networks using OpenAPI endpoint. Query parameters can be supplied to +// perform additional filtering +func GetAllExternalNetworksV2(vcdClient *VCDClient, queryParameters url.Values) ([]*ExternalNetworkV2, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks + apiVersion, err := vcdClient.Client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vcdClient.Client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.ExternalNetworkV2{{}} + err = vcdClient.Client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into external network types with client + returnExtNetworks := make([]*ExternalNetworkV2, len(typeResponses)) + for sliceIndex := range typeResponses { + returnExtNetworks[sliceIndex] = &ExternalNetworkV2{ + ExternalNetwork: typeResponses[sliceIndex], + client: &vcdClient.Client, + } + } + + return returnExtNetworks, nil +} + +// Update updates existing external network using OpenAPI endpoint +func (extNet *ExternalNetworkV2) Update() (*ExternalNetworkV2, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks + apiVersion, err := extNet.client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + if extNet.ExternalNetwork.ID == "" { + return nil, fmt.Errorf("cannot update external network without id") + } + + urlRef, err := extNet.client.OpenApiBuildEndpoint(endpoint, extNet.ExternalNetwork.ID) + if err != nil { + return nil, err + } + + returnExtNet := &ExternalNetworkV2{ + ExternalNetwork: &types.ExternalNetworkV2{}, + client: extNet.client, + } + + err = extNet.client.OpenApiPutItem(apiVersion, urlRef, nil, extNet.ExternalNetwork, returnExtNet.ExternalNetwork, nil) + if err != nil { + return nil, fmt.Errorf("error updating external network: %s", err) + } + + return returnExtNet, nil +} + +// Delete deletes external network using OpenAPI endpoint +func (extNet *ExternalNetworkV2) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks + apiVersion, err := extNet.client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return err + } + + if extNet.ExternalNetwork.ID == "" { + return fmt.Errorf("cannot delete external network without id") + } + + urlRef, err := extNet.client.OpenApiBuildEndpoint(endpoint, extNet.ExternalNetwork.ID) + if err != nil { + return err + } + + err = extNet.client.OpenApiDeleteItem(apiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting extNet: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/externalnetwork.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/externalnetwork.go new file mode 100644 index 000000000..9e28cacc3 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/externalnetwork.go @@ -0,0 +1,83 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" + "net/http" +) + +type ExternalNetwork struct { + ExternalNetwork *types.ExternalNetwork + client *Client +} + +func NewExternalNetwork(cli *Client) *ExternalNetwork { + return &ExternalNetwork{ + ExternalNetwork: new(types.ExternalNetwork), + client: cli, + } +} + +func getExternalNetworkHref(client *Client) (string, error) { + extensions, err := getExtension(client) + if err != nil { + return "", err + } + + for _, extensionLink := range extensions.Link { + if extensionLink.Type == "application/vnd.vmware.admin.vmwExternalNetworkReferences+xml" { + return extensionLink.HREF, nil + } + } + + return "", errors.New("external network link wasn't found") +} + +func (externalNetwork ExternalNetwork) Refresh() error { + + if !externalNetwork.client.IsSysAdmin { + return fmt.Errorf("functionality requires System Administrator privileges") + } + + _, err := externalNetwork.client.ExecuteRequest(externalNetwork.ExternalNetwork.HREF, http.MethodGet, + "", "error refreshing external network: %s", nil, externalNetwork.ExternalNetwork) + + return err +} + +func validateExternalNetwork(externalNetwork *types.ExternalNetwork) error { + if externalNetwork.Name == "" { + return errors.New("external Network missing required field: Name") + } + return nil +} + +func (externalNetwork *ExternalNetwork) Delete() (Task, error) { + util.Logger.Printf("[TRACE] ExternalNetwork.Delete") + + if !externalNetwork.client.IsSysAdmin { + return Task{}, fmt.Errorf("functionality requires System Administrator privileges") + } + + // Return the task + return externalNetwork.client.ExecuteTaskRequest(externalNetwork.ExternalNetwork.HREF, http.MethodDelete, + "", "error deleting external network: %s", nil) +} + +func (externalNetwork *ExternalNetwork) DeleteWait() error { + task, err := externalNetwork.Delete() + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish removing external network %#v", err) + } + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_condition.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_condition.go new file mode 100644 index 000000000..ac9e0861c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_condition.go @@ -0,0 +1,186 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "regexp" + + "github.com/kr/pretty" +) + +// A conditionDef is the data being carried by the filter engine when performing comparisons +type conditionDef struct { + conditionType string // it's one of SupportedFilters + stored interface{} // Any value as handled by the filter being used +} + +// A dateCondition can evaluate a date expression +type dateCondition struct { + dateExpression string +} + +// A regexpCondition is a generic filter that is the basis for other filters that require a regular expression +type regexpCondition struct { + regExpression *regexp.Regexp +} + +// an ipCondition is a condition that compares an IP using a regexp +type ipCondition regexpCondition + +// a nameCondition is a condition that compares a name using a regexp +type nameCondition regexpCondition + +// a metadataRegexpCondition compares the values corresponding to the given key using a regexp +type metadataRegexpCondition struct { + key string + regExpression *regexp.Regexp +} + +// a parentCondition compares the entity parent name with the one stored +type parentCondition struct { + parentName string +} + +// a parentIdCondition compares the entity parent ID with the one stored +type parentIdCondition struct { + parentId string +} + +// matchParent matches the wanted parent name (passed in 'stored') to the parent of the queryItem +// Input: +// * stored: the data of the condition (a parentCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchParent(stored, item interface{}) (bool, string, error) { + condition, ok := stored.(parentCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a Parent condition (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by parent: %# v", pretty.Formatter(item)) + } + parent := queryItem.GetParentName() + + return condition.parentName == parent, fmt.Sprintf("%s == %s", condition.parentName, queryItem.GetParentName()), nil +} + +// matchParentId matches the wanted parent ID (passed in 'stored') to the parent ID of the queryItem +// The IDs being compared are filtered through extractUuid, to make them homogeneous +// Input: +// * stored: the data of the condition (a parentCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchParentId(stored, item interface{}) (bool, string, error) { + condition, ok := stored.(parentIdCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a parent ID condition (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by parent ID: %# v", pretty.Formatter(item)) + } + parentId := queryItem.GetParentId() + parentId = extractUuid(parentId) + condition.parentId = extractUuid(condition.parentId) + + return condition.parentId == parentId, fmt.Sprintf("%s =~ %s", condition.parentId, parentId), nil +} + +// matchName matches a name (passed in 'stored') to the name of the queryItem +// Input: +// * stored: the data of the condition (a nameCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchName(stored, item interface{}) (bool, string, error) { + re, ok := stored.(nameCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a Name Regexp (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by regex: %# v", pretty.Formatter(item)) + } + return re.regExpression.MatchString(queryItem.GetName()), fmt.Sprintf("%s =~ %s", re.regExpression.String(), queryItem.GetName()), nil +} + +// matchIp matches an IP (passed in 'stored') to the IP of the queryItem +// Input: +// * stored: the data of the condition (an ipCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchIp(stored, item interface{}) (bool, string, error) { + re, ok := stored.(ipCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a Condition Regexp (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by Ip: %# v", pretty.Formatter(item)) + } + ip := queryItem.GetIp() + if ip == "" { + return false, "", fmt.Errorf("%s %s doesn't have an IP", queryItem.GetType(), queryItem.GetName()) + } + return re.regExpression.MatchString(ip), fmt.Sprintf("%s =~ %s", re.regExpression.String(), queryItem.GetIp()), nil +} + +// matchDate matches a date (passed in 'stored') to the date of the queryItem +// Input: +// * stored: the data of the condition (a dateCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchDate(stored, item interface{}) (bool, string, error) { + expr, ok := stored.(dateCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a condition date (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by date: %# v", pretty.Formatter(item)) + } + if queryItem.GetDate() == "" { + return false, "", nil + } + + result, err := compareDate(expr.dateExpression, queryItem.GetDate()) + return result, fmt.Sprintf("%s %s", queryItem.GetDate(), expr.dateExpression), err +} + +// matchMetadata matches a value (passed in 'stored') to the metadata value retrieved from queryItem +// Input: +// * stored: the data of the condition (a metadataRegexpCondition) +// * item: a QueryItem +// Returns: +// * bool: the result of the comparison +// * string: a description of the operation +// * error: an error when the input is not as expected +func matchMetadata(stored, item interface{}) (bool, string, error) { + re, ok := stored.(metadataRegexpCondition) + if !ok { + return false, "", fmt.Errorf("stored value is not a Metadata condition (%# v)", pretty.Formatter(stored)) + } + queryItem, ok := item.(QueryItem) + if !ok { + return false, "", fmt.Errorf("item is not a queryItem searchable by Metadata: %# v", pretty.Formatter(item)) + } + return re.regExpression.MatchString(queryItem.GetMetadataValue(re.key)), fmt.Sprintf("metadata: %s -> %s", re.key, re.regExpression.String()), nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_engine.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_engine.go new file mode 100644 index 000000000..d091acfee --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_engine.go @@ -0,0 +1,388 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/kr/pretty" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type queryWithMetadataFunc func(queryType string, params, notEncodedParams map[string]string, + metadataFields []string, isSystem bool) (Results, error) + +type queryByMetadataFunc func(queryType string, params, notEncodedParams map[string]string, + metadataFilters map[string]MetadataFilter, isSystem bool) (Results, error) + +type resultsConverterFunc func(queryType string, results Results) ([]QueryItem, error) + +// searchByFilter is a generic filter that can operate on entities that implement the QueryItem interface +// It requires a queryType and a set of criteria. +// Returns a list of QueryItem interface elements, which can be cast back to the wanted real type +// Also returns a human readable text of the conditions being passed and how they matched the data found +func searchByFilter(queryByMetadata queryByMetadataFunc, queryWithMetadataFields queryWithMetadataFunc, + converter resultsConverterFunc, queryType string, criteria *FilterDef) ([]QueryItem, string, error) { + + // Set of conditions to be evaluated (will be filled from criteria) + var conditions []conditionDef + // List of candidate items that match all conditions + var candidatesByConditions []QueryItem + + // List of metadata fields that will be added to the query + var metadataFields []string + + // If set, metadata fields will be passed as 'metadata@SYSTEM:fieldName' + var isSystem bool + var params = make(map[string]string) + + // Will search the latest item if requested + searchLatest := false + // Will search the earliest item if requested + searchEarliest := false + + // A null filter is converted into an empty object. + // Using an empty filter is equivalent to fetching all items without filtering + if criteria == nil { + criteria = &FilterDef{} + } + + // A text containing the human-readable form of the criteria being used, and the detail on how they matched the + // data being fetched + explanation := conditionText(criteria) + + // A collection of matching information for the conditions being applied + var matches []matchResult + + // Parse criteria and build the condition list + for key, value := range criteria.Filters { + // Empty values could be leftovers from the criteria build-up prior to calling this function + if value == "" { + continue + } + switch key { + case types.FilterNameRegex: + re, err := regexp.Compile(value) + if err != nil { + return nil, explanation, fmt.Errorf("error compiling regular expression '%s' : %s ", value, err) + } + conditions = append(conditions, conditionDef{key, nameCondition{re}}) + case types.FilterDate: + conditions = append(conditions, conditionDef{key, dateCondition{value}}) + case types.FilterIp: + re, err := regexp.Compile(value) + if err != nil { + return nil, explanation, fmt.Errorf("error compiling regular expression '%s' : %s ", value, err) + } + conditions = append(conditions, conditionDef{key, ipCondition{re}}) + case types.FilterParent: + conditions = append(conditions, conditionDef{key, parentCondition{value}}) + case types.FilterParentId: + conditions = append(conditions, conditionDef{key, parentIdCondition{value}}) + + case types.FilterLatest: + searchLatest = stringToBool(value) + + case types.FilterEarliest: + searchEarliest = stringToBool(value) + + default: + return nil, explanation, fmt.Errorf("[SearchByFilter] filter '%s' not supported (only allowed %v)", key, supportedFilters) + } + } + + // We can't allow the search for both the oldest and the newest item + if searchEarliest && searchLatest { + return nil, explanation, fmt.Errorf("only one of '%s' or '%s' can be used for a set of criteria", types.FilterEarliest, types.FilterLatest) + } + + var metadataFilter = make(map[string]MetadataFilter) + // Fill metadata filters + if len(criteria.Metadata) > 0 { + for _, cond := range criteria.Metadata { + k := cond.Key + v := cond.Value + isSystem = cond.IsSystem + if k == "" { + return nil, explanation, fmt.Errorf("metadata condition without key detected") + } + if v == "" { + return nil, explanation, fmt.Errorf("empty value for metadata condition with key '%s'", k) + } + + // If we use the metadata search through the API, we must make sure that the type is set + if criteria.UseMetadataApiFilter { + if cond.Type == "" || strings.EqualFold(cond.Type, "none") { + return nil, explanation, fmt.Errorf("requested search by metadata field '%s' must provide a valid type", cond.Key) + } + + // The type must be one of the expected values + err := validateMetadataType(cond.Type) + if err != nil { + return nil, explanation, fmt.Errorf("type '%s' for metadata field '%s' is invalid. :%s", cond.Type, cond.Key, err) + } + metadataFilter[cond.Key] = MetadataFilter{ + Type: cond.Type, + Value: fmt.Sprintf("%v", cond.Value), + } + } + + // If we don't use metadata search via the API, we add the field to the list, and + // also add a condition, using regular expressions + if !criteria.UseMetadataApiFilter { + metadataFields = append(metadataFields, k) + re, err := regexp.Compile(v.(string)) + if err != nil { + return nil, explanation, fmt.Errorf("error compiling regular expression '%s' : %s ", v, err) + } + conditions = append(conditions, conditionDef{"metadata", metadataRegexpCondition{k, re}}) + } + } + } else { + criteria.UseMetadataApiFilter = false + } + + var itemResult Results + var err error + + if criteria.UseMetadataApiFilter { + // This result will not include metadata fields. The query will use metadata parameters to restrict the search + itemResult, err = queryByMetadata(queryType, nil, params, metadataFilter, isSystem) + } else { + // This result includes metadata fields, if they exist. + itemResult, err = queryWithMetadataFields(queryType, nil, params, metadataFields, isSystem) + } + + if err != nil { + return nil, explanation, fmt.Errorf("[SearchByFilter] error retrieving query item list: %s", err) + } + if dataInspectionRequested("QE1") { + util.Logger.Printf("[INSPECT-QE1-SearchByFilter] list of retrieved items %# v\n", pretty.Formatter(itemResult.Results)) + } + var itemList []QueryItem + + // Converting the query result into a list of QueryItems + itemList, err = converter(queryType, itemResult) + if err != nil { + return nil, explanation, fmt.Errorf("[SearchByFilter] error converting QueryItem item list: %s", err) + } + if dataInspectionRequested("QE2") { + util.Logger.Printf("[INSPECT-QE2-SearchByFilter] list of converted items %# v\n", pretty.Formatter(itemList)) + } + + // Process the list using the conditions gathered above + for _, item := range itemList { + numOfMatches := 0 + + for _, condition := range conditions { + + if dataInspectionRequested("QE3") { + util.Logger.Printf("[INSPECT-QE3-SearchByFilter]\ncondition %# v\nitem %# v\n", pretty.Formatter(condition), pretty.Formatter(item)) + } + result, definition, err := conditionMatches(condition.conditionType, condition.stored, item) + if err != nil { + return nil, explanation, fmt.Errorf("[SearchByFilter] error applying condition %v: %s", condition, err) + } + + // Saves matching information, which will be consolidated in the final explanation text + matches = append(matches, matchResult{ + Name: item.GetName(), + Type: condition.conditionType, + Definition: definition, + Result: result, + }) + if !result { + continue + } + + numOfMatches++ + } + if numOfMatches == len(conditions) { + // All conditions were met + candidatesByConditions = append(candidatesByConditions, item) + } + } + + // Consolidates the explanation with information about which conditions did actually match + matchesText := matchesToText(matches) + explanation += fmt.Sprintf("\n%s", matchesText) + util.Logger.Printf("[SearchByFilter] conditions matching\n%s", explanation) + + // Once all the conditions have been evaluated, we check whether we got any items left. + // + // We consider an empty result to be a valid one: it's up to the caller to evaluate the result + // and eventually use the explanation to provide an error message + if len(candidatesByConditions) == 0 { + return nil, explanation, nil + } + + // If we have only one item, there is no reason to search further for the newest or oldest item + if len(candidatesByConditions) == 1 { + return candidatesByConditions, explanation, nil + } + var emptyDatesFound []string + if searchLatest { + // By setting the latest date to the early possible date, we make sure that it will be swapped + // at the first comparison + var latestDate = "1970-01-01 00:00:00" + // item with the latest date among the candidates + var candidateByLatest QueryItem + for _, candidate := range candidatesByConditions { + itemDate := candidate.GetDate() + if itemDate == "" { + emptyDatesFound = append(emptyDatesFound, candidate.GetName()) + continue + } + util.Logger.Printf("[SearchByFilter] search latest: comparing %s to %s", latestDate, itemDate) + greater, err := compareDate(fmt.Sprintf("> %s", latestDate), itemDate) + if err != nil { + return nil, explanation, fmt.Errorf("[SearchByFilter] error comparing dates %s > %s : %s", + candidate.GetDate(), latestDate, err) + } + util.Logger.Printf("[SearchByFilter] result %v: ", greater) + if greater { + latestDate = candidate.GetDate() + candidateByLatest = candidate + } + } + if candidateByLatest != nil { + explanation += "\nlatest item found" + return []QueryItem{candidateByLatest}, explanation, nil + } else { + return nil, explanation, fmt.Errorf("search for newest item failed. Empty dates found for items %v", emptyDatesFound) + } + } + if searchEarliest { + // earliest date is set to a date in the future (10 years from now), so that any date found will be evaluated as + // earlier than this one + var earliestDate = time.Now().AddDate(10, 0, 0).String() + // item with the earliest date among the candidates + var candidateByEarliest QueryItem + for _, candidate := range candidatesByConditions { + itemDate := candidate.GetDate() + if itemDate == "" { + emptyDatesFound = append(emptyDatesFound, candidate.GetName()) + continue + } + util.Logger.Printf("[SearchByFilter] search earliest: comparing %s to %s", earliestDate, candidate.GetDate()) + greater, err := compareDate(fmt.Sprintf("< %s", earliestDate), candidate.GetDate()) + if err != nil { + return nil, explanation, fmt.Errorf("[SearchByFilter] error comparing dates %s > %s: %s", + candidate.GetDate(), earliestDate, err) + } + util.Logger.Printf("[SearchByFilter] result %v: ", greater) + if greater { + earliestDate = candidate.GetDate() + candidateByEarliest = candidate + } + } + if candidateByEarliest != nil { + explanation += "\nearliest item found" + return []QueryItem{candidateByEarliest}, explanation, nil + } else { + return nil, explanation, fmt.Errorf("search for oldest item failed. Empty dates found for items %v", emptyDatesFound) + } + } + return candidatesByConditions, explanation, nil +} + +// conditionMatches performs the appropriate condition evaluation, +// depending on conditionType +func conditionMatches(conditionType string, stored, item interface{}) (bool, string, error) { + switch conditionType { + case types.FilterNameRegex: + return matchName(stored, item) + case types.FilterDate: + return matchDate(stored, item) + case types.FilterIp: + return matchIp(stored, item) + case types.FilterParent: + return matchParent(stored, item) + case types.FilterParentId: + return matchParentId(stored, item) + case "metadata": + return matchMetadata(stored, item) + } + return false, "", fmt.Errorf("unsupported condition type '%s'", conditionType) +} + +// SearchByFilter is a generic filter that can operate on entities that implement the QueryItem interface +// It requires a queryType and a set of criteria. +// Returns a list of QueryItem interface elements, which can be cast back to the wanted real type +// Also returns a human readable text of the conditions being passed and how they matched the data found +// See "## Query engine" in CODING_GUIDELINES.md for more info +func (client *Client) SearchByFilter(queryType string, criteria *FilterDef) ([]QueryItem, string, error) { + return searchByFilter(client.queryByMetadataFilter, client.queryWithMetadataFields, resultToQueryItems, queryType, criteria) +} + +// SearchByFilter runs the search for a specific catalog +// The 'parentField' argument defines which filter will be added, depending on the items we search for: +// - 'catalog' contains the catalog HREF or ID +// - 'catalogName' contains the catalog name +func (catalog *Catalog) SearchByFilter(queryType, parentField string, criteria *FilterDef) ([]QueryItem, string, error) { + var err error + switch parentField { + case "catalog": + err = criteria.AddFilter(types.FilterParentId, catalog.Catalog.ID) + case "catalogName": + err = criteria.AddFilter(types.FilterParent, catalog.Catalog.Name) + default: + return nil, "", fmt.Errorf("unrecognized filter field '%s'", parentField) + } + if err != nil { + return nil, "", fmt.Errorf("error setting parent filter for catalog %s with fieldName '%s'", catalog.Catalog.Name, parentField) + } + return catalog.client.SearchByFilter(queryType, criteria) +} + +// SearchByFilter runs the search for a specific VDC +// The 'parentField' argument defines which filter will be added, depending on the items we search for: +// - 'vdc' contains the VDC HREF or ID +// - 'vdcName' contains the VDC name +func (vdc *Vdc) SearchByFilter(queryType, parentField string, criteria *FilterDef) ([]QueryItem, string, error) { + var err error + switch parentField { + case "vdc": + err = criteria.AddFilter(types.FilterParentId, vdc.Vdc.ID) + case "vdcName": + err = criteria.AddFilter(types.FilterParent, vdc.Vdc.Name) + default: + return nil, "", fmt.Errorf("unrecognized filter field '%s'", parentField) + } + if err != nil { + return nil, "", fmt.Errorf("error setting parent filter for VDC %s with fieldName '%s'", vdc.Vdc.Name, parentField) + } + return vdc.client.SearchByFilter(queryType, criteria) +} + +// SearchByFilter runs the search for a specific Org +func (org *AdminOrg) SearchByFilter(queryType string, criteria *FilterDef) ([]QueryItem, string, error) { + err := criteria.AddFilter(types.FilterParent, org.AdminOrg.Name) + if err != nil { + return nil, "", fmt.Errorf("error setting parent filter for Org %s with fieldName 'orgName'", org.AdminOrg.Name) + } + return org.client.SearchByFilter(queryType, criteria) +} + +// SearchByFilter runs the search for a specific Org +func (org *Org) SearchByFilter(queryType string, criteria *FilterDef) ([]QueryItem, string, error) { + err := criteria.AddFilter(types.FilterParent, org.Org.Name) + if err != nil { + return nil, "", fmt.Errorf("error setting parent filter for Org %s with fieldName 'orgName'", org.Org.Name) + } + return org.client.SearchByFilter(queryType, criteria) +} + +// dataInspectionRequested checks if the given code was found in the inspection environment variable. +func dataInspectionRequested(code string) bool { + govcdInspect := os.Getenv("GOVCD_INSPECT") + return strings.Contains(govcdInspect, code) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_helpers.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_helpers.go new file mode 100644 index 000000000..93c764055 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_helpers.go @@ -0,0 +1,541 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// This file contains functions that help create tests for filtering. +// It is not in the '*_test.go' namespace because we want to use these functions from tests in other packages. +// All exported functions from this file have the prefix "Helper" +// +// Moreover, this file is not in a separate package for the following reasons: +// * getExistingMedia is private +// * getMetadata is private +// * the 'client' component in all entity objects is private +// * the tests that are now in filter_engine_test.go would need to go in a separate package, with consequent +// need for configuration file parser duplication. + +type StringMap map[string]string + +type DateItem struct { + Name string + Date string + Entity interface{} + EntityType string +} + +// FilterMatch contains a filter, the name of the item that is expected to match, and the item itself +type FilterMatch struct { + Criteria *FilterDef + ExpectedName string + Entity interface{} + EntityType string +} + +type VappTemplateData struct { + Name string + ItemCreationDate string + VappTemplateCreationDate string + Metadata StringMap + Created bool +} + +// retrievedMetadataTypes maps the internal value of metadata type with the +// string needed when searching for a metadata field in the API +var retrievedMetadataTypes = map[string]string{ + "MetadataBooleanValue": "BOOLEAN", + "MetadataStringValue": "STRING", + "MetadataNumberValue": "NUMBER", + "MetadataDateTimeValue": "STRING", // values for DATETIME can't be passed as such in a query when the date contains colons. +} + +// HelperMakeFiltersFromEdgeGateways looks at the existing edge gateways and creates a set of criteria to retrieve each of them +func HelperMakeFiltersFromEdgeGateways(vdc *Vdc) ([]FilterMatch, error) { + egwList, err := vdc.QueryEdgeGatewayList() + if err != nil { + return nil, err + } + + if len(egwList) == 0 { + return []FilterMatch{}, nil + } + var filters = make([]FilterMatch, len(egwList)) + for i, egw := range egwList { + + filter := NewFilterDef() + err = filter.AddFilter(types.FilterNameRegex, strToRegex(egw.Name)) + if err != nil { + return nil, err + } + filters[i] = FilterMatch{filter, egw.Name, QueryEdgeGateway(*egw), "QueryEdgeGateway"} + } + return filters, nil +} + +// HelperMakeFiltersFromNetworks looks at the existing networks and creates a set of criteria to retrieve each of them +func HelperMakeFiltersFromNetworks(vdc *Vdc) ([]FilterMatch, error) { + netList, err := vdc.GetNetworkList() + if err != nil { + return nil, err + } + var filters = make([]FilterMatch, len(netList)) + for i, net := range netList { + + localizedItem := QueryOrgVdcNetwork(*net) + qItem := QueryItem(localizedItem) + filter, _, err := queryItemToFilter(qItem, "QueryOrgVdcNetwork") + if err != nil { + return nil, err + } + + filter, err = vdc.client.metadataToFilter(net.HREF, filter) + if err != nil { + return nil, err + } + filters[i] = FilterMatch{filter, net.Name, localizedItem, "QueryOrgVdcNetwork"} + } + return filters, nil +} + +// makeDateFilter creates date filters from a set of date records +// If there is more than one item, it creates an 'earliest' and 'latest' filter +func makeDateFilter(items []DateItem) ([]FilterMatch, error) { + var filters []FilterMatch + + if len(items) == 0 { + return filters, nil + } + entityType := items[0].EntityType + if len(items) == 1 { + filter := NewFilterDef() + err := filter.AddFilter(types.FilterDate, "=="+items[0].Date) + filters = append(filters, FilterMatch{filter, items[0].Name, items[0].Entity, entityType}) + return filters, err + } + earliestDate := time.Now().AddDate(100, 0, 0).String() + latestDate := "1970-01-01 00:00:00" + earliestName := "" + latestName := "" + var earliestEntity interface{} + var latestEntity interface{} + earliestFound := false + latestFound := false + for _, item := range items { + greater, err := compareDate(">"+latestDate, item.Date) + if err != nil { + return nil, err + } + if greater { + latestDate = item.Date + latestName = item.Name + latestEntity = item.Entity + latestFound = true + } + greater, err = compareDate("<"+earliestDate, item.Date) + if err != nil { + return nil, err + } + if greater { + earliestDate = item.Date + earliestName = item.Name + earliestEntity = item.Entity + earliestFound = true + } + exactFilter := NewFilterDef() + err = exactFilter.AddFilter(types.FilterDate, "=="+item.Date) + if err != nil { + return nil, fmt.Errorf("error adding filter '%s' '%s': %s", types.FilterDate, "=="+item.Date, err) + } + filters = append(filters, FilterMatch{exactFilter, item.Name, item.Entity, item.EntityType}) + } + + if earliestFound && latestFound && earliestDate != latestDate { + earlyFilter := NewFilterDef() + err := earlyFilter.AddFilter(types.FilterDate, "<"+latestDate) + if err != nil { + return nil, err + } + err = earlyFilter.AddFilter(types.FilterEarliest, "true") + if err != nil { + return nil, err + } + + lateFilter := NewFilterDef() + err = lateFilter.AddFilter(types.FilterDate, ">"+earliestDate) + if err != nil { + return nil, err + } + err = lateFilter.AddFilter(types.FilterLatest, "true") + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{earlyFilter, earliestName, earliestEntity, entityType}) + filters = append(filters, FilterMatch{lateFilter, latestName, latestEntity, entityType}) + } + + return filters, nil +} + +func HelperMakeFiltersFromCatalogs(org *AdminOrg) ([]FilterMatch, error) { + catalogs, err := org.QueryCatalogList() + if err != nil { + return []FilterMatch{}, err + } + + var filters []FilterMatch + + var dateInfo []DateItem + for _, cat := range catalogs { + localizedItem := QueryCatalog(*cat) + qItem := QueryItem(localizedItem) + filter, dInfo, err := queryItemToFilter(qItem, "QueryCatalog") + if err != nil { + return nil, err + } + + dateInfo = append(dateInfo, dInfo...) + + filter, err = org.client.metadataToFilter(cat.HREF, filter) + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{filter, cat.Name, localizedItem, "QueryCatalog"}) + } + dateFilter, err := makeDateFilter(dateInfo) + if err != nil { + return []FilterMatch{}, err + } + if len(dateFilter) > 0 { + filters = append(filters, dateFilter...) + } + return filters, nil +} + +func HelperMakeFiltersFromMedia(vdc *Vdc, catalogName string) ([]FilterMatch, error) { + var filters []FilterMatch + items, err := getExistingMedia(vdc) + if err != nil { + return filters, err + } + var dateInfo []DateItem + for _, item := range items { + + if item.CatalogName != catalogName { + continue + } + localizedItem := QueryMedia(*item) + qItem := QueryItem(localizedItem) + filter, dInfo, err := queryItemToFilter(qItem, "QueryMedia") + if err != nil { + return nil, err + } + + dateInfo = append(dateInfo, dInfo...) + + filter, err = vdc.client.metadataToFilter(item.HREF, filter) + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{filter, item.Name, localizedItem, "QueryMedia"}) + } + dateFilter, err := makeDateFilter(dateInfo) + if err != nil { + return nil, err + } + if len(dateFilter) > 0 { + filters = append(filters, dateFilter...) + } + return filters, nil +} + +func queryItemToFilter(item QueryItem, entityType string) (*FilterDef, []DateItem, error) { + + var dateInfo []DateItem + filter := NewFilterDef() + err := filter.AddFilter(types.FilterNameRegex, strToRegex(item.GetName())) + if err != nil { + return nil, nil, err + } + + if item.GetIp() != "" { + err = filter.AddFilter(types.FilterIp, ipToRegex(item.GetIp())) + if err != nil { + return nil, nil, err + } + } + if item.GetDate() != "" { + dateInfo = append(dateInfo, DateItem{item.GetName(), item.GetDate(), item, entityType}) + } + return filter, dateInfo, nil +} + +func HelperMakeFiltersFromCatalogItem(catalog *Catalog) ([]FilterMatch, error) { + var filters []FilterMatch + items, err := catalog.QueryCatalogItemList() + if err != nil { + return filters, err + } + var dateInfo []DateItem + for _, item := range items { + + localItem := QueryCatalogItem(*item) + qItem := QueryItem(localItem) + + filter, dInfo, err := queryItemToFilter(qItem, "QueryCatalogItem") + if err != nil { + return nil, err + } + + dateInfo = append(dateInfo, dInfo...) + + filter, err = catalog.client.metadataToFilter(item.HREF, filter) + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{filter, item.Name, localItem, "QueryCatalogItem"}) + } + dateFilter, err := makeDateFilter(dateInfo) + if err != nil { + return nil, err + } + if len(dateFilter) > 0 { + filters = append(filters, dateFilter...) + } + return filters, nil +} + +func HelperMakeFiltersFromVappTemplate(catalog *Catalog) ([]FilterMatch, error) { + var filters []FilterMatch + items, err := catalog.QueryVappTemplateList() + if err != nil { + return filters, err + } + var dateInfo []DateItem + for _, item := range items { + + localItem := QueryVAppTemplate(*item) + qItem := QueryItem(localItem) + + filter, dInfo, err := queryItemToFilter(qItem, "QueryVAppTemplate") + if err != nil { + return nil, err + } + + dateInfo = append(dateInfo, dInfo...) + + filter, err = catalog.client.metadataToFilter(item.HREF, filter) + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{filter, item.Name, localItem, "QueryVAppTemplate"}) + } + dateFilter, err := makeDateFilter(dateInfo) + if err != nil { + return nil, err + } + if len(dateFilter) > 0 { + filters = append(filters, dateFilter...) + } + return filters, nil +} + +// HelperCreateMultipleCatalogItems deploys several catalog items, as defined in requestData +// Returns a set of VappTemplateData with what was created. +// If the requested objects exist already, returns updated information about the existing items. +func HelperCreateMultipleCatalogItems(catalog *Catalog, requestData []VappTemplateData, verbose bool) ([]VappTemplateData, error) { + var data []VappTemplateData + ova := "../test-resources/test_vapp_template.ova" + _, err := os.Stat(ova) + if os.IsNotExist(err) { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] sample OVA %s not found", ova) + } + overallStart := time.Now() + for _, requested := range requestData { + name := requested.Name + + var item *CatalogItem + var vappTemplate VAppTemplate + created := false + item, err := catalog.GetCatalogItemByName(name, false) + if err == nil { + // If the item already exists, we skip the creation, and just retrieve the vapp template + vappTemplate, err = item.GetVAppTemplate() + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] error retrieving vApp template from catalog item %s : %s", item.CatalogItem.Name, err) + } + } else { + + start := time.Now() + if verbose { + fmt.Printf("%-55s %s ", start, name) + } + task, err := catalog.UploadOvf(ova, name, "test "+name, 10) + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] error uploading OVA: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] error completing task :%s", err) + } + item, err = catalog.GetCatalogItemByName(name, true) + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] error retrieving item %s: %s", name, err) + } + vappTemplate, err = item.GetVAppTemplate() + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems] error retrieving vApp template: %s", err) + } + + for k, v := range requested.Metadata { + _, err := vappTemplate.AddMetadata(k, v) + if err != nil { + return nil, fmt.Errorf("[HelperCreateMultipleCatalogItems], error adding metadata: %s", err) + } + } + duration := time.Since(start) + if verbose { + fmt.Printf("- elapsed: %s\n", duration) + } + + created = true + } + data = append(data, VappTemplateData{ + Name: name, + ItemCreationDate: item.CatalogItem.DateCreated, + VappTemplateCreationDate: vappTemplate.VAppTemplate.DateCreated, + Metadata: requested.Metadata, + Created: created, + }) + } + overallDuration := time.Since(overallStart) + if verbose { + fmt.Printf("total elapsed: %s\n", overallDuration) + } + + return data, nil +} + +func HelperMakeFiltersFromOrgVdc(org *Org) ([]FilterMatch, error) { + var filters []FilterMatch + items, err := org.QueryOrgVdcList() + if err != nil { + return filters, err + } + for _, item := range items { + localItem := QueryOrgVdc(*item) + qItem := QueryItem(localItem) + + filter, _, err := queryItemToFilter(qItem, "QueryOrgVdc") + if err != nil { + return nil, err + } + + filter, err = org.client.metadataToFilter(item.HREF, filter) + if err != nil { + return nil, err + } + + filters = append(filters, FilterMatch{filter, item.Name, localItem, "QueryOrgVdc"}) + } + + return filters, nil +} + +// ipToRegex creates a regular expression that matches an IP without the last element +func ipToRegex(ip string) string { + elements := strings.Split(ip, ".") + result := "^" + for i := 0; i < len(elements)-1; i++ { + result += elements[i] + `\.` + } + return result +} + +// strToRegex creates a regular expression that matches perfectly with the input query +func strToRegex(s string) string { + var result strings.Builder + var err error + _, err = result.WriteString("^") + if err != nil { + util.Logger.Printf("[DEBUG - strToRegex] error writing to string: %s", err) + } + for _, ch := range s { + if ch == '.' { + _, err = result.WriteString(fmt.Sprintf("\\%c", ch)) + } else { + _, err = result.WriteString(fmt.Sprintf("[%c]", ch)) + } + if err != nil { + util.Logger.Printf("[DEBUG - strToRegex] error writing to string: %s", err) + } + } + _, err = result.WriteString("$") + if err != nil { + util.Logger.Printf("[DEBUG - strToRegex] error writing to string: %s", err) + } + return result.String() +} + +// guessMetadataType guesses the type of a metadata value from its contents +// If the value looks like a number, or a true/false value, the corresponding type is returned +// Otherwise, we assume it's a string. +// We do this because the API doesn't return the metadata type +// (it would if the field TypedValue.XsiType were defined as `xml:"type,attr"`, but then metadata updates would fail.) +func guessMetadataType(value string) string { + fType := "STRING" + reNumber := regexp.MustCompile(`^[0-9]+$`) + reBool := regexp.MustCompile(`^(?:true|false)$`) + if reNumber.MatchString(value) { + fType = "NUMBER" + } + if reBool.MatchString(value) { + fType = "BOOLEAN" + } + return fType +} + +// metadataToFilter adds metadata elements to an existing filter +// href is the address of the entity for which we want to retrieve metadata +// filter is an existing filter to which we want to add metadata elements +func (client *Client) metadataToFilter(href string, filter *FilterDef) (*FilterDef, error) { + if filter == nil { + filter = &FilterDef{} + } + metadata, err := getMetadata(client, href) + if err == nil && metadata != nil && len(metadata.MetadataEntry) > 0 { + for _, md := range metadata.MetadataEntry { + isSystem := md.Domain == "SYSTEM" + var fType string + var ok bool + if md.TypedValue.XsiType == "" { + fType = guessMetadataType(md.TypedValue.Value) + } else { + fType, ok = retrievedMetadataTypes[md.TypedValue.XsiType] + if !ok { + fType = "STRING" + } + } + err = filter.AddMetadataFilter(md.Key, md.TypedValue.Value, fType, isSystem, false) + if err != nil { + return nil, err + } + } + } + return filter, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_interface.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_interface.go new file mode 100644 index 000000000..fc3d5a8a5 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_interface.go @@ -0,0 +1,297 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// QueryItem is an entity that is used to evaluate a Condition +type QueryItem interface { + GetDate() string + GetName() string + GetType() string + GetIp() string + GetMetadataValue(key string) string + GetParentName() string + GetParentId() string + GetHref() string +} + +type ( + // All the Query* types are localizations of Query records that can be returned from a query. + // Each one of these implements the QueryItem interface + QueryVAppTemplate types.QueryResultVappTemplateType + QueryCatalogItem types.QueryResultCatalogItemType + QueryEdgeGateway types.QueryResultEdgeGatewayRecordType + QueryAdminCatalog types.AdminCatalogRecord + QueryCatalog types.CatalogRecord + QueryOrgVdcNetwork types.QueryResultOrgVdcNetworkRecordType + QueryMedia types.MediaRecordType + QueryVapp types.QueryResultVAppRecordType + QueryVm types.QueryResultVMRecordType + QueryOrgVdc types.QueryResultOrgVdcRecordType +) + +// getMetadataValue is a generic metadata lookup for all query items +func getMetadataValue(metadata *types.Metadata, key string) string { + if metadata == nil || len(metadata.MetadataEntry) == 0 { + return "" + } + for _, x := range metadata.MetadataEntry { + if key == x.Key { + return x.TypedValue.Value + } + } + return "" +} + +// -------------------------------------------------------------- +// Org VDC +// -------------------------------------------------------------- +func (orgVdc QueryOrgVdc) GetHref() string { return orgVdc.HREF } +func (orgVdc QueryOrgVdc) GetName() string { return orgVdc.Name } +func (orgVdc QueryOrgVdc) GetType() string { return "org_vdc" } +func (orgVdc QueryOrgVdc) GetIp() string { return "" } // IP does not apply to VDC +func (orgVdc QueryOrgVdc) GetDate() string { return "" } // Date does not aply to VDC +func (orgVdc QueryOrgVdc) GetParentName() string { return orgVdc.OrgName } +func (orgVdc QueryOrgVdc) GetParentId() string { return orgVdc.Org } +func (orgVdc QueryOrgVdc) GetMetadataValue(key string) string { + return getMetadataValue(orgVdc.Metadata, key) +} + +// -------------------------------------------------------------- +// vApp template +// -------------------------------------------------------------- +func (vappTemplate QueryVAppTemplate) GetHref() string { return vappTemplate.HREF } +func (vappTemplate QueryVAppTemplate) GetName() string { return vappTemplate.Name } +func (vappTemplate QueryVAppTemplate) GetType() string { return "vapp_template" } +func (vappTemplate QueryVAppTemplate) GetIp() string { return "" } +func (vappTemplate QueryVAppTemplate) GetDate() string { return vappTemplate.CreationDate } +func (vappTemplate QueryVAppTemplate) GetParentName() string { return vappTemplate.CatalogName } +func (vappTemplate QueryVAppTemplate) GetParentId() string { return vappTemplate.Vdc } +func (vappTemplate QueryVAppTemplate) GetMetadataValue(key string) string { + return getMetadataValue(vappTemplate.Metadata, key) +} + +// -------------------------------------------------------------- +// media item +// -------------------------------------------------------------- +func (media QueryMedia) GetHref() string { return media.HREF } +func (media QueryMedia) GetName() string { return media.Name } +func (media QueryMedia) GetType() string { return "catalog_media" } +func (media QueryMedia) GetIp() string { return "" } +func (media QueryMedia) GetDate() string { return media.CreationDate } +func (media QueryMedia) GetParentName() string { return media.CatalogName } +func (media QueryMedia) GetParentId() string { return media.Catalog } +func (media QueryMedia) GetMetadataValue(key string) string { + return getMetadataValue(media.Metadata, key) +} + +// -------------------------------------------------------------- +// catalog item +// -------------------------------------------------------------- +func (catItem QueryCatalogItem) GetHref() string { return catItem.HREF } +func (catItem QueryCatalogItem) GetName() string { return catItem.Name } +func (catItem QueryCatalogItem) GetIp() string { return "" } +func (catItem QueryCatalogItem) GetType() string { return "catalog_item" } +func (catItem QueryCatalogItem) GetDate() string { return catItem.CreationDate } +func (catItem QueryCatalogItem) GetParentName() string { return catItem.CatalogName } +func (catItem QueryCatalogItem) GetParentId() string { return catItem.Catalog } +func (catItem QueryCatalogItem) GetMetadataValue(key string) string { + return getMetadataValue(catItem.Metadata, key) +} + +// -------------------------------------------------------------- +// catalog +// -------------------------------------------------------------- +func (catalog QueryCatalog) GetHref() string { return catalog.HREF } +func (catalog QueryCatalog) GetName() string { return catalog.Name } +func (catalog QueryCatalog) GetIp() string { return "" } +func (catalog QueryCatalog) GetType() string { return "catalog" } +func (catalog QueryCatalog) GetDate() string { return catalog.CreationDate } +func (catalog QueryCatalog) GetParentName() string { return catalog.OrgName } +func (catalog QueryCatalog) GetParentId() string { return "" } +func (catalog QueryCatalog) GetMetadataValue(key string) string { + return getMetadataValue(catalog.Metadata, key) +} + +func (catalog QueryAdminCatalog) GetHref() string { return catalog.HREF } +func (catalog QueryAdminCatalog) GetName() string { return catalog.Name } +func (catalog QueryAdminCatalog) GetIp() string { return "" } +func (catalog QueryAdminCatalog) GetType() string { return "catalog" } +func (catalog QueryAdminCatalog) GetDate() string { return catalog.CreationDate } +func (catalog QueryAdminCatalog) GetParentName() string { return catalog.OrgName } +func (catalog QueryAdminCatalog) GetParentId() string { return "" } +func (catalog QueryAdminCatalog) GetMetadataValue(key string) string { + return getMetadataValue(catalog.Metadata, key) +} + +// -------------------------------------------------------------- +// edge gateway +// -------------------------------------------------------------- +func (egw QueryEdgeGateway) GetHref() string { return egw.HREF } +func (egw QueryEdgeGateway) GetName() string { return egw.Name } +func (egw QueryEdgeGateway) GetIp() string { return "" } +func (egw QueryEdgeGateway) GetType() string { return "edge_gateway" } +func (egw QueryEdgeGateway) GetDate() string { return "" } +func (egw QueryEdgeGateway) GetParentName() string { return egw.OrgVdcName } +func (egw QueryEdgeGateway) GetParentId() string { return egw.Vdc } +func (egw QueryEdgeGateway) GetMetadataValue(key string) string { + // Edge Gateway doesn't support metadata + return "" +} + +// -------------------------------------------------------------- +// Org VDC network +// -------------------------------------------------------------- +func (network QueryOrgVdcNetwork) GetHref() string { return network.HREF } +func (network QueryOrgVdcNetwork) GetName() string { return network.Name } +func (network QueryOrgVdcNetwork) GetIp() string { return network.DefaultGateway } +func (network QueryOrgVdcNetwork) GetType() string { + switch network.LinkType { + case 0: + return "network_direct" + case 1: + return "network_routed" + case 2: + return "network_isolated" + default: + // There are only three types so far, but just to make it future proof + return "network" + } +} +func (network QueryOrgVdcNetwork) GetDate() string { return "" } +func (network QueryOrgVdcNetwork) GetParentName() string { return network.VdcName } +func (network QueryOrgVdcNetwork) GetParentId() string { return network.Vdc } +func (network QueryOrgVdcNetwork) GetMetadataValue(key string) string { + return getMetadataValue(network.Metadata, key) +} + +// -------------------------------------------------------------- +// vApp +// -------------------------------------------------------------- +func (vapp QueryVapp) GetHref() string { return vapp.HREF } +func (vapp QueryVapp) GetName() string { return vapp.Name } +func (vapp QueryVapp) GetType() string { return "vApp" } +func (vapp QueryVapp) GetIp() string { return "" } +func (vapp QueryVapp) GetDate() string { return vapp.CreationDate } +func (vapp QueryVapp) GetParentName() string { return vapp.VdcName } +func (vapp QueryVapp) GetParentId() string { return vapp.VdcHREF } +func (vapp QueryVapp) GetMetadataValue(key string) string { + return getMetadataValue(vapp.MetaData, key) +} + +// -------------------------------------------------------------- +// VM +// -------------------------------------------------------------- +func (vm QueryVm) GetHref() string { return vm.HREF } +func (vm QueryVm) GetName() string { return vm.Name } +func (vm QueryVm) GetType() string { return "Vm" } +func (vm QueryVm) GetIp() string { return vm.IpAddress } +func (vm QueryVm) GetDate() string { return vm.DateCreated } +func (vm QueryVm) GetParentName() string { return vm.ContainerName } +func (vm QueryVm) GetParentId() string { return vm.VdcHREF } +func (vm QueryVm) GetMetadataValue(key string) string { + return getMetadataValue(vm.MetaData, key) +} + +// -------------------------------------------------------------- +// result conversion +// -------------------------------------------------------------- +// resultToQueryItems converts a set of query results into a list of query items +func resultToQueryItems(queryType string, results Results) ([]QueryItem, error) { + resultSize := int64(results.Results.Total) + if resultSize < 1 { + return nil, nil + } + var items = make([]QueryItem, resultSize) + switch queryType { + case types.QtAdminCatalogItem: + for i, item := range results.Results.AdminCatalogItemRecord { + items[i] = QueryCatalogItem(*item) + } + case types.QtCatalogItem: + for i, item := range results.Results.CatalogItemRecord { + items[i] = QueryCatalogItem(*item) + } + case types.QtMedia: + for i, item := range results.Results.MediaRecord { + items[i] = QueryMedia(*item) + } + case types.QtAdminMedia: + for i, item := range results.Results.AdminMediaRecord { + items[i] = QueryMedia(*item) + } + case types.QtVappTemplate: + for i, item := range results.Results.VappTemplateRecord { + items[i] = QueryVAppTemplate(*item) + } + case types.QtAdminVappTemplate: + for i, item := range results.Results.AdminVappTemplateRecord { + items[i] = QueryVAppTemplate(*item) + } + case types.QtEdgeGateway: + for i, item := range results.Results.EdgeGatewayRecord { + items[i] = QueryEdgeGateway(*item) + } + case types.QtOrgVdcNetwork: + for i, item := range results.Results.OrgVdcNetworkRecord { + items[i] = QueryOrgVdcNetwork(*item) + } + case types.QtCatalog: + for i, item := range results.Results.CatalogRecord { + items[i] = QueryCatalog(*item) + } + case types.QtAdminCatalog: + for i, item := range results.Results.AdminCatalogRecord { + items[i] = QueryAdminCatalog(*item) + } + case types.QtVm: + for i, item := range results.Results.VMRecord { + items[i] = QueryVm(*item) + } + case types.QtAdminVm: + for i, item := range results.Results.AdminVMRecord { + items[i] = QueryVm(*item) + } + case types.QtVapp: + for i, item := range results.Results.VAppRecord { + items[i] = QueryVapp(*item) + } + case types.QtAdminVapp: + for i, item := range results.Results.AdminVAppRecord { + items[i] = QueryVapp(*item) + } + case types.QtOrgVdc: + for i, item := range results.Results.OrgVdcRecord { + items[i] = QueryOrgVdc(*item) + } + case types.QtAdminOrgVdc: + for i, item := range results.Results.OrgVdcAdminRecord { + items[i] = QueryOrgVdc(*item) + } + } + if len(items) > 0 { + return items, nil + } + return nil, fmt.Errorf("unsupported query type %s", queryType) +} + +// GetQueryType is an utility function to get the appropriate query type depending on +// the user's role +func (client Client) GetQueryType(queryType string) string { + if client.IsSysAdmin { + adminType, ok := types.AdminQueryTypes[queryType] + if ok { + return adminType + } else { + panic(fmt.Sprintf("no corresponding admin type found for type %s", queryType)) + } + } + return queryType +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_util.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_util.go new file mode 100644 index 000000000..fa519593e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/filter_util.go @@ -0,0 +1,204 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "regexp" + "strings" + + "github.com/araddon/dateparse" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +var ( + // supportedFilters lists the filters currently supported in the engine, available to users + supportedFilters = []string{ + types.FilterNameRegex, + types.FilterDate, + types.FilterIp, + types.FilterLatest, + types.FilterEarliest, + types.FilterParent, + types.FilterParentId, + } + + // SupportedMetadataTypes are the metadata types recognized so far. "NONE" is the same as "" + SupportedMetadataTypes = []string{"NONE", "STRING", "NUMBER", "BOOLEAN", "DATETIME"} +) + +// MetadataDef defines a metadata structure +type MetadataDef struct { + Key string // name of the field (addressed as metadata:key) + Type string // Type of the field (one of SupportedMetadataTypes) + Value interface{} // contents of the metadata field + IsSystem bool // if true, the metadata field will be addressed as metadata@SYSTEM:key +} + +// matchResult stores the result of a condition evaluation +// Used to build the human readable description of the engine operations +type matchResult struct { + Name string + Type string + Definition string + Result bool +} + +// FilterDef defines all the criteria used by the engine to retrieve data +type FilterDef struct { + // A collection of filters (with keys from SupportedFilters) + Filters map[string]string + + // A list of metadata filters + Metadata []MetadataDef + + // If true, the query will include metadata fields and search for exact values. + // Otherwise, the engine will collect metadata fields and search by regexp + UseMetadataApiFilter bool +} + +// NewFilterDef builds a new filter definition +func NewFilterDef() *FilterDef { + return &FilterDef{ + Filters: make(map[string]string), + Metadata: nil, + } +} + +// validateMetadataType checks that a metadata type is within supported types +func validateMetadataType(valueType string) error { + typeSupported := false + for _, supported := range SupportedMetadataTypes { + if valueType == supported { + typeSupported = true + } + } + if !typeSupported { + return fmt.Errorf("metadata type '%s' not supported", valueType) + } + return nil +} + +// AddFilter adds a new filter to the criteria +func (fd *FilterDef) AddFilter(key, value string) error { + for _, allowed := range supportedFilters { + if key == allowed { + fd.Filters[key] = value + return nil + } + } + return fmt.Errorf("filter '%s' not supported", key) +} + +// AddMetadataFilter adds a new metadata filter to an existing set +func (fd *FilterDef) AddMetadataFilter(key, value, valueType string, isSystem, useMetadataApiFilter bool) error { + if valueType == "" { + valueType = "NONE" + useMetadataApiFilter = false + } + if useMetadataApiFilter { + fd.UseMetadataApiFilter = true + } + err := validateMetadataType(valueType) + if err != nil { + return err + } + fd.Metadata = append(fd.Metadata, MetadataDef{ + Key: key, + Value: value, + IsSystem: isSystem, + Type: valueType, + }) + return nil +} + +// stringToBool converts a string to a bool +// The following values are recognized as TRUE: +// t, true, y, yes, ok +func stringToBool(s string) bool { + switch strings.ToLower(s) { + case "t", "true", "y", "yes", "ok": + return true + default: + return false + } +} + +// compareDate will get a date from string `got`, and will parse `wanted` +// for an expression starting with an operator (>, <, >=, <=, ==) followed by a date +// (many formats supported, but 'YYYY-MM-DD[ hh:mm[:ss[.nnnZ]]' preferred) +// For example: +// got: "2020-03-09T09:50:51.500Z" +// wanted: ">= 2020-03-08" +// result: true +// got: "2020-03-09T09:50:51.500Z" +// wanted: "< 02-mar-2020" +// result: false +// See https://github.com/araddon/dateparse for more info +func compareDate(wanted, got string) (bool, error) { + + reExpression := regexp.MustCompile(`(>=|<=|==|<|=|>)\s*(.+)`) + + expList := reExpression.FindAllStringSubmatch(wanted, -1) + if len(expList) == 0 || len(expList[0]) == 0 { + return false, fmt.Errorf("expression not found in '%s'", wanted) + } + + operator := expList[0][1] + wantedTime, err := dateparse.ParseStrict(expList[0][2]) + if err != nil { + return false, err + } + + gotTime, err := dateparse.ParseStrict(got) + if err != nil { + return false, err + } + + wantedSeconds := wantedTime.UnixNano() + gotSeconds := gotTime.UnixNano() + + switch operator { + case "=", "==": + return gotSeconds == wantedSeconds, nil + case ">": + return gotSeconds > wantedSeconds, nil + case ">=": + return gotSeconds >= wantedSeconds, nil + case "<=": + return gotSeconds <= wantedSeconds, nil + case "<": + return gotSeconds < wantedSeconds, nil + default: + return false, fmt.Errorf("unsupported operator '%s'", operator) + } +} + +// conditionText provides a human readable string of searching criteria +func conditionText(criteria *FilterDef) string { + result := "criteria: " + + for k, v := range criteria.Filters { + result += fmt.Sprintf(`("%s" -> "%s") `, k, v) + } + for _, m := range criteria.Metadata { + marker := "metadata" + if criteria.UseMetadataApiFilter { + marker = "metadataApi" + } + result += fmt.Sprintf(`%s("%s" -> "%s") `, marker, m.Key, m.Value) + } + return result +} + +// matchesToText provides a human readable string of search operations results +func matchesToText(matches []matchResult) string { + result := "" + for _, item := range matches { + result += fmt.Sprintf("name: %s; type: %s definition: %s; result: %v\n", item.Name, item.Type, item.Definition, item.Result) + } + return result +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/global_role.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/global_role.go new file mode 100644 index 000000000..9ce8fa2c5 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/global_role.go @@ -0,0 +1,383 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +type GlobalRole struct { + GlobalRole *types.GlobalRole + client *Client +} + +// GetAllGlobalRoles retrieves all global roles. Query parameters can be supplied to perform additional filtering +// Only System administrator can handle global roles +func (client *Client) GetAllGlobalRoles(queryParameters url.Values) ([]*GlobalRole, error) { + if !client.IsSysAdmin { + return nil, fmt.Errorf("only system administrator can handle global roles") + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.GlobalRole{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into GlobalRole types with client + returnGlobalRoles := make([]*GlobalRole, len(typeResponses)) + for sliceIndex := range typeResponses { + returnGlobalRoles[sliceIndex] = &GlobalRole{ + GlobalRole: typeResponses[sliceIndex], + client: client, + } + } + + return returnGlobalRoles, nil +} + +// GetGlobalRoleByName retrieves a global role by given name +func (client *Client) GetGlobalRoleByName(name string) (*GlobalRole, error) { + queryParams := url.Values{} + queryParams.Add("filter", "name=="+name) + globalRoles, err := client.GetAllGlobalRoles(queryParams) + if err != nil { + return nil, err + } + if len(globalRoles) == 0 { + return nil, ErrorEntityNotFound + } + if len(globalRoles) > 1 { + return nil, fmt.Errorf("more than one global role found with name '%s'", name) + } + return globalRoles[0], nil +} + +// GetGlobalRoleById retrieves global role by given ID +func (client *Client) GetGlobalRoleById(id string) (*GlobalRole, error) { + if !client.IsSysAdmin { + return nil, fmt.Errorf("only system administrator can handle global roles") + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty GlobalRole id") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + globalRole := &GlobalRole{ + GlobalRole: &types.GlobalRole{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, globalRole.GlobalRole, nil) + if err != nil { + return nil, err + } + + return globalRole, nil +} + +// CreateGlobalRole creates a new global role as a system administrator +func (client *Client) CreateGlobalRole(newGlobalRole *types.GlobalRole) (*GlobalRole, error) { + if !client.IsSysAdmin { + return nil, fmt.Errorf("only system administrator can handle global roles") + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + if newGlobalRole.BundleKey == "" { + newGlobalRole.BundleKey = types.VcloudUndefinedKey + } + if newGlobalRole.PublishAll == nil { + newGlobalRole.PublishAll = takeBoolPointer(false) + } + returnGlobalRole := &GlobalRole{ + GlobalRole: &types.GlobalRole{}, + client: client, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, newGlobalRole, returnGlobalRole.GlobalRole, nil) + if err != nil { + return nil, fmt.Errorf("error creating global role: %s", err) + } + + return returnGlobalRole, nil +} + +// Update updates existing global role +func (globalRole *GlobalRole) Update() (*GlobalRole, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + minimumApiVersion, err := globalRole.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if globalRole.GlobalRole.Id == "" { + return nil, fmt.Errorf("cannot update role without id") + } + + urlRef, err := globalRole.client.OpenApiBuildEndpoint(endpoint, globalRole.GlobalRole.Id) + if err != nil { + return nil, err + } + + returnGlobalRole := &GlobalRole{ + GlobalRole: &types.GlobalRole{}, + client: globalRole.client, + } + + err = globalRole.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, globalRole.GlobalRole, returnGlobalRole.GlobalRole, nil) + if err != nil { + return nil, fmt.Errorf("error updating global role: %s", err) + } + + return returnGlobalRole, nil +} + +// Delete deletes global role +func (globalRole *GlobalRole) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + minimumApiVersion, err := globalRole.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if globalRole.GlobalRole.Id == "" { + return fmt.Errorf("cannot delete global role without id") + } + + urlRef, err := globalRole.client.OpenApiBuildEndpoint(endpoint, globalRole.GlobalRole.Id) + if err != nil { + return err + } + + err = globalRole.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting global role: %s", err) + } + + return nil +} + +// getContainerTenants retrieves all tenants associated with a given rights container (Global Role, Rights Bundle). +// Query parameters can be supplied to perform additional filtering +func getContainerTenants(client *Client, rightsContainerId, endpoint string, queryParameters url.Values) ([]types.OpenApiReference, error) { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint + rightsContainerId + "/tenants") + if err != nil { + return nil, err + } + + typeResponses := types.OpenApiItems{ + Values: []types.OpenApiReference{}, + } + + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses.Values, nil) + if err != nil { + return nil, err + } + + return typeResponses.Values, nil +} + +// publishContainerToTenants is a generic function that publishes or unpublishes a rights collection (Global Role, or Rights bundle) to tenants +// containerType is an informative string (one of "GlobalRole", "RightsBundle") +// name and id are the name and ID of the collection +// endpoint is the API endpoint used as a basis for the POST operation +// tenants is a collection of tenants (ID+name) to be added +// publishType can be one of "add", "remove", "replace" +func publishContainerToTenants(client *Client, containerType, name, id, endpoint string, tenants []types.OpenApiReference, publishType string) error { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if id == "" { + return fmt.Errorf("cannot update %s without id", containerType) + } + if name == "" { + return fmt.Errorf("empty name given for %s %s", containerType, id) + } + + var operation string + + var action func(apiVersion string, urlRef *url.URL, params url.Values, payload, outType interface{}, additionalHeader map[string]string) error + + switch publishType { + case "add": + operation = "/tenants/publish" + action = client.OpenApiPostItem + case "replace": + operation = "/tenants" + action = client.OpenApiPutItem + case "remove": + operation = "/tenants/unpublish" + action = client.OpenApiPostItem + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id, operation) + if err != nil { + return err + } + + var input types.OpenApiItems + + for _, tenant := range tenants { + input.Values = append(input.Values, types.OpenApiReference{ + Name: tenant.Name, + ID: tenant.ID, + }) + } + var pages types.OpenApiPages + + err = action(minimumApiVersion, urlRef, nil, &input, &pages, nil) + + if err != nil { + return fmt.Errorf("error publishing %s %s to tenants: %s", containerType, name, err) + } + + return nil +} + +// publishContainerToAllTenants is a generic function that publishes or unpublishes a rights collection ( Global Role, or Rights bundle) to all tenants +// containerType is an informative string (one of "GlobalRole", "RightsBundle") +// name and id are the name and ID of the collection +// endpoint is the API endpoint used as a basis for the POST operation +// If "publish" is false, it will revert the operation +func publishContainerToAllTenants(client *Client, containerType, name, id, endpoint string, publish bool) error { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if id == "" { + return fmt.Errorf("cannot update %s without id", containerType) + } + if name == "" { + return fmt.Errorf("empty name given for %s %s", containerType, id) + } + + operation := "/tenants/publishAll" + if !publish { + operation = "/tenants/unpublishAll" + } + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id, operation) + if err != nil { + return err + } + + var pages types.OpenApiPages + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, &pages, &pages, nil) + + if err != nil { + return fmt.Errorf("error publishing %s %s to tenants: %s", containerType, name, err) + } + + return nil +} + +// AddRights adds a collection of rights to a global role +func (globalRole *GlobalRole) AddRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return addRightsToRole(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, newRights, nil) +} + +// UpdateRights replaces existing rights with the given collection of rights +func (globalRole *GlobalRole) UpdateRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return updateRightsInRole(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, newRights, nil) +} + +// RemoveRights removes specific rights from a global role +func (globalRole *GlobalRole) RemoveRights(removeRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return removeRightsFromRole(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, removeRights, nil) +} + +// RemoveAllRights removes all rights from a global role +func (globalRole *GlobalRole) RemoveAllRights() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return removeAllRightsFromRole(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, nil) +} + +// GetRights retrieves all rights belonging to a given Global Role. Query parameters can be supplied to perform additional +// filtering +func (globalRole *GlobalRole) GetRights(queryParameters url.Values) ([]*types.Right, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return getRights(globalRole.client, globalRole.GlobalRole.Id, endpoint, queryParameters, nil) +} + +// GetTenants retrieves all tenants associated to a given Global Role. Query parameters can be supplied to perform additional +// filtering +func (globalRole *GlobalRole) GetTenants(queryParameters url.Values) ([]types.OpenApiReference, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return getContainerTenants(globalRole.client, globalRole.GlobalRole.Id, endpoint, queryParameters) +} + +// PublishTenants publishes a global role to one or more tenants, adding to tenants that may already been there +func (globalRole *GlobalRole) PublishTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return publishContainerToTenants(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, tenants, "add") +} + +// ReplacePublishedTenants publishes a global role to one or more tenants, removing the tenants already present +func (globalRole *GlobalRole) ReplacePublishedTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return publishContainerToTenants(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, tenants, "replace") +} + +// UnpublishTenants remove tenats from a global role +func (globalRole *GlobalRole) UnpublishTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return publishContainerToTenants(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, tenants, "remove") +} + +// PublishAllTenants publishes a global role to all tenants +func (globalRole *GlobalRole) PublishAllTenants() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return publishContainerToAllTenants(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, true) +} + +// UnpublishAllTenants remove publication status of a global role from all tenants +func (globalRole *GlobalRole) UnpublishAllTenants() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles + return publishContainerToAllTenants(globalRole.client, "GlobalRole", globalRole.GlobalRole.Name, globalRole.GlobalRole.Id, endpoint, false) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/graph_plan.png b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/graph_plan.png new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/group.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/group.go new file mode 100644 index 000000000..263dd418f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/group.go @@ -0,0 +1,185 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// OrgGroup defines group structure +type OrgGroup struct { + Group *types.Group + client *Client + AdminOrg *AdminOrg // needed to be able to update, as the list of roles is found in the Org +} + +// NewGroup creates a new group structure which still needs to have Group attribute populated +func NewGroup(cli *Client, org *AdminOrg) *OrgGroup { + return &OrgGroup{ + Group: new(types.Group), + client: cli, + AdminOrg: org, + } +} + +// CreateGroup creates a group in Org. Supported provider types are `OrgUserProviderIntegrated` and +// `OrgUserProviderSAML`. +// +// Note. This request will return HTTP 403 if Org is not configured for SAML or LDAP usage. +func (adminOrg *AdminOrg) CreateGroup(group *types.Group) (*OrgGroup, error) { + if err := validateCreateUpdateGroup(group); err != nil { + return nil, err + } + + groupCreateHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return nil, fmt.Errorf("error parsing admin org url: %s", err) + } + groupCreateHREF.Path += "/groups" + + grpgroup := NewGroup(adminOrg.client, adminOrg) + // Add default XML types + group.Xmlns = types.XMLNamespaceVCloud + group.Type = types.MimeAdminGroup + + _, err = adminOrg.client.ExecuteRequest(groupCreateHREF.String(), http.MethodPost, + types.MimeAdminGroup, "error creating group: %s", group, grpgroup.Group) + if err != nil { + return nil, err + } + + return grpgroup, nil +} + +// GetGroupByHref retrieves group by HREF +func (adminOrg *AdminOrg) GetGroupByHref(href string) (*OrgGroup, error) { + orgGroup := NewGroup(adminOrg.client, adminOrg) + + _, err := adminOrg.client.ExecuteRequest(href, http.MethodGet, + types.MimeAdminUser, "error getting group: %s", nil, orgGroup.Group) + + if err != nil { + return nil, err + } + return orgGroup, nil +} + +// GetGroupByName retrieves group by Name +func (adminOrg *AdminOrg) GetGroupByName(name string, refresh bool) (*OrgGroup, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + for _, group := range adminOrg.AdminOrg.Groups.Group { + if group.Name == name { + return adminOrg.GetGroupByHref(group.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetGroupById retrieves group by Id +func (adminOrg *AdminOrg) GetGroupById(id string, refresh bool) (*OrgGroup, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + for _, group := range adminOrg.AdminOrg.Groups.Group { + if equalIds(id, group.ID, group.HREF) { + return adminOrg.GetGroupByHref(group.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetGroupByNameOrId retrieves group by Name or Id. Id is prioritized for search +func (adminOrg *AdminOrg) GetGroupByNameOrId(identifier string, refresh bool) (*OrgGroup, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetGroupByName(name, refresh) } + getById := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetGroupById(name, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*OrgGroup), err +} + +// Update allows to update group. vCD API allows to update only role +func (group *OrgGroup) Update() error { + util.Logger.Printf("[TRACE] Updating group: %s", group.Group.Name) + + if err := validateCreateUpdateGroup(group.Group); err != nil { + return err + } + + groupHREF, err := url.ParseRequestURI(group.Group.Href) + if err != nil { + return fmt.Errorf("error getting HREF for group %s : %s", group.Group.Href, err) + } + util.Logger.Printf("[TRACE] Url for updating group : %s and name: %s", groupHREF.String(), group.Group.Name) + + _, err = group.client.ExecuteRequest(groupHREF.String(), http.MethodPut, + types.MimeAdminGroup, "error updating group : %s", group.Group, nil) + return err +} + +// Delete removes a group +func (group *OrgGroup) Delete() error { + if err := validateDeleteGroup(group.Group); err != nil { + return err + } + + groupHREF, err := url.ParseRequestURI(group.Group.Href) + if err != nil { + return fmt.Errorf("error getting HREF for group %s : %s", group.Group.Name, err) + } + util.Logger.Printf("[TRACE] Url for deleting group : %s and name: %s", groupHREF, group.Group.Name) + + return group.client.ExecuteRequestWithoutResponse(groupHREF.String(), http.MethodDelete, + types.MimeAdminGroup, "error deleting group : %s", nil) +} + +// validateCreateGroup checks if mandatory fields are set for group creation and update +func validateCreateUpdateGroup(group *types.Group) error { + if group == nil { + return fmt.Errorf("group cannot be nil") + } + + if group.Name == "" { + return fmt.Errorf("group must have a name") + } + + if group.ProviderType == "" { + return fmt.Errorf("group must have provider type set") + } + + if group.Role.HREF == "" { + return fmt.Errorf("group role must have HREF set") + } + return nil +} + +// validateDeleteGroup checks if mandatory fields are set for delete +func validateDeleteGroup(group *types.Group) error { + if group == nil { + return fmt.Errorf("group cannot be nil") + } + + if group.Href == "" { + return fmt.Errorf("HREF must be set to delete group") + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lb.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lb.go new file mode 100644 index 000000000..0cfaca26c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lb.go @@ -0,0 +1,35 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "path" + "strings" +) + +// extractNsxObjectIdFromPath parses proxied NSX API response Location header and +// extracts ID for newly created object from it. The object ID is the last element in path. +// It expects the path to have at least one "/" to be a valid path and cleans up the trailing slash +// if there is one. +// +// Sample locationPath from API: /network/edges/edge-3/loadbalancer/config/monitors/monitor-5 +// Expected ID to be returned: monitor-5 +func extractNsxObjectIdFromPath(locationPath string) (string, error) { + if locationPath == "" { + return "", fmt.Errorf("unable to get ID from empty path") + } + + cleanPath := path.Clean(locationPath) // Removes trailing slash if there is one + splitPath := strings.Split(cleanPath, "/") + + if len(splitPath) < 2 { + return "", fmt.Errorf("path does not contain url path: %s", splitPath) + } + + objectID := splitPath[len(splitPath)-1] + + return objectID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbappprofile.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbappprofile.go new file mode 100644 index 000000000..8015cc518 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbappprofile.go @@ -0,0 +1,238 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// CreateLbAppProfile creates a load balancer application profile based on mandatory fields. It is a +// synchronous operation. It returns created object with all fields (including ID) populated or an error. +func (egw *EdgeGateway) CreateLbAppProfile(lbAppProfileConfig *types.LbAppProfile) (*types.LbAppProfile, error) { + if err := validateCreateLbAppProfile(lbAppProfileConfig, egw); err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppProfilePath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating load balancer application profile: %s", lbAppProfileConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // [/network/edges/edge-3/loadbalancer/config/applicationprofiles/applicationProfile-4] + lbAppProfileID, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readAppProfile, err := egw.GetLbAppProfileById(lbAppProfileID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve application profile with ID (%s) after creation: %s", + lbAppProfileID, err) + } + return readAppProfile, nil +} + +// GetLbAppProfiles returns a list of all LB application profiles in a given edge gateway +func (egw *EdgeGateway) GetLbAppProfiles() ([]*types.LbAppProfile, error) { + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppProfilePath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Anonymous struct to unwrap response + lbAppProfileResponse := &struct { + LbAppProfiles []*types.LbAppProfile `xml:"applicationProfile"` + }{} + + // This query returns all application profiles as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read load balancer application profile: %s", nil, lbAppProfileResponse) + if err != nil { + return nil, err + } + return lbAppProfileResponse.LbAppProfiles, nil +} + +// getLbAppProfile is able to find the types.LbAppProfile type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) getLbAppProfile(lbAppProfileConfig *types.LbAppProfile) (*types.LbAppProfile, error) { + if err := validateGetLbAppProfile(lbAppProfileConfig, egw); err != nil { + return nil, err + } + + lbAppProfiles, err := egw.GetLbAppProfiles() + if err != nil { + return nil, err + } + + // Search for application profile by ID or by Name + for _, profile := range lbAppProfiles { + // If ID was specified for lookup - look for the same ID + if lbAppProfileConfig.ID != "" && profile.ID == lbAppProfileConfig.ID { + return profile, nil + } + + // If Name was specified for lookup - look for the same Name + if lbAppProfileConfig.Name != "" && profile.Name == lbAppProfileConfig.Name { + // We found it by name. Let's verify if search ID was specified and it matches the lookup object + if lbAppProfileConfig.ID != "" && profile.ID != lbAppProfileConfig.ID { + return nil, fmt.Errorf("load balancer application profile was found by name (%s)"+ + ", but its ID (%s) does not match specified ID (%s)", + profile.Name, profile.ID, lbAppProfileConfig.ID) + } + return profile, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetLbAppProfileById wraps getLbAppProfile and needs only an ID for lookup +func (egw *EdgeGateway) GetLbAppProfileById(id string) (*types.LbAppProfile, error) { + return egw.getLbAppProfile(&types.LbAppProfile{ID: id}) +} + +// GetLbAppProfileByName wraps getLbAppProfile and needs only a Name for lookup +func (egw *EdgeGateway) GetLbAppProfileByName(name string) (*types.LbAppProfile, error) { + return egw.getLbAppProfile(&types.LbAppProfile{Name: name}) +} + +// UpdateLbAppProfile updates types.LbAppProfile with all fields. At least name or ID must be specified. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) UpdateLbAppProfile(lbAppProfileConfig *types.LbAppProfile) (*types.LbAppProfile, error) { + err := validateUpdateLbAppProfile(lbAppProfileConfig, egw) + if err != nil { + return nil, err + } + + lbAppProfileConfig.ID, err = egw.getLbAppProfileIdByNameId(lbAppProfileConfig.Name, lbAppProfileConfig.ID) + if err != nil { + return nil, fmt.Errorf("cannot update load balancer application profile: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppProfilePath + lbAppProfileConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer application profile : %s", lbAppProfileConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readAppProfile, err := egw.GetLbAppProfileById(lbAppProfileConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve application profile with ID (%s) after update: %s", + lbAppProfileConfig.ID, err) + } + return readAppProfile, nil +} + +// DeleteLbAppProfile is able to delete the types.LbAppProfile type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) DeleteLbAppProfile(lbAppProfileConfig *types.LbAppProfile) error { + err := validateDeleteLbAppProfile(lbAppProfileConfig, egw) + if err != nil { + return err + } + + lbAppProfileConfig.ID, err = egw.getLbAppProfileIdByNameId(lbAppProfileConfig.Name, lbAppProfileConfig.ID) + if err != nil { + return fmt.Errorf("cannot delete load balancer application profile: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppProfilePath + lbAppProfileConfig.ID) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete application profile: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteLbAppProfileById wraps DeleteLbAppProfile and requires only ID for deletion +func (egw *EdgeGateway) DeleteLbAppProfileById(id string) error { + return egw.DeleteLbAppProfile(&types.LbAppProfile{ID: id}) +} + +// DeleteLbAppProfileByName wraps DeleteLbAppProfile and requires only Name for deletion +func (egw *EdgeGateway) DeleteLbAppProfileByName(name string) error { + return egw.DeleteLbAppProfile(&types.LbAppProfile{Name: name}) +} + +func validateCreateLbAppProfile(lbAppProfileConfig *types.LbAppProfile, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbAppProfileConfig.Name == "" { + return fmt.Errorf("load balancer application profile Name cannot be empty") + } + + return nil +} + +func validateGetLbAppProfile(lbAppProfileConfig *types.LbAppProfile, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbAppProfileConfig.ID == "" && lbAppProfileConfig.Name == "" { + return fmt.Errorf("to read load balancer application profile at least one of `ID`, `Name`" + + " fields must be specified") + } + + return nil +} + +func validateUpdateLbAppProfile(lbAppProfileConfig *types.LbAppProfile, egw *EdgeGateway) error { + // Update and create have the same requirements for now + return validateCreateLbAppProfile(lbAppProfileConfig, egw) +} + +func validateDeleteLbAppProfile(lbAppProfileConfig *types.LbAppProfile, egw *EdgeGateway) error { + // Read and delete have the same requirements for now + return validateGetLbAppProfile(lbAppProfileConfig, egw) +} + +// getLbAppProfileIdByNameId checks if at least name or ID is set and returns the ID. +// If the ID is specified - it passes through the ID. If only name was specified +// it will lookup the object by name and return the ID. +func (egw *EdgeGateway) getLbAppProfileIdByNameId(name, id string) (string, error) { + if name == "" && id == "" { + return "", fmt.Errorf("at least Name or ID must be specific to find load balancer "+ + "application profile got name (%s) ID (%s)", name, id) + } + if id != "" { + return id, nil + } + + // if only name was specified, ID must be found, because only ID can be used in request path + readlbAppProfile, err := egw.GetLbAppProfileByName(name) + if err != nil { + return "", fmt.Errorf("unable to find load balancer application profile by name: %s", err) + } + return readlbAppProfile.ID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbapprule.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbapprule.go new file mode 100644 index 000000000..b09d2b055 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbapprule.go @@ -0,0 +1,239 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// CreateLbAppRule creates a load balancer application rule based on mandatory fields. It is a +// synchronous operation. It returns created object with all fields (including ID) populated or an error. +func (egw *EdgeGateway) CreateLbAppRule(lbAppRuleConfig *types.LbAppRule) (*types.LbAppRule, error) { + if err := validateCreateLbAppRule(lbAppRuleConfig, egw); err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppRulePath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating load balancer application rule: %s", lbAppRuleConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // [/network/edges/edge-3/loadbalancer/config/applicationrules/applicationRule-4] + lbAppRuleId, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readAppRule, err := egw.GetLbAppRuleById(lbAppRuleId) + if err != nil { + return nil, fmt.Errorf("unable to retrieve application rule with ID (%s) after creation: %s", + lbAppRuleId, err) + } + return readAppRule, nil +} + +// GetLbAppRules returns a list of all LB application rules for a given edge gateway +func (egw *EdgeGateway) GetLbAppRules() ([]*types.LbAppRule, error) { + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppRulePath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Anonymous struct to unwrap response + lbAppRuleResponse := &struct { + LbAppRules []*types.LbAppRule `xml:"applicationRule"` + }{} + + // This query returns all application rules as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read load balancer application rule: %s", nil, lbAppRuleResponse) + if err != nil { + return nil, err + } + return lbAppRuleResponse.LbAppRules, nil +} + +// getLbAppRule is able to find the types.LbAppRule type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) getLbAppRule(lbAppRuleConfig *types.LbAppRule) (*types.LbAppRule, error) { + if err := validateGetLbAppRule(lbAppRuleConfig, egw); err != nil { + return nil, err + } + + lbAppRules, err := egw.GetLbAppRules() + if err != nil { + return nil, err + } + + // Search for application rule by ID or by Name + for _, rule := range lbAppRules { + // If ID was specified for lookup - look for the same ID + if lbAppRuleConfig.ID != "" && rule.ID == lbAppRuleConfig.ID { + return rule, nil + } + + // If Name was specified for lookup - look for the same Name + if lbAppRuleConfig.Name != "" && rule.Name == lbAppRuleConfig.Name { + // We found it by name. Let's verify if search ID was specified and it matches the lookup object + if lbAppRuleConfig.ID != "" && rule.ID != lbAppRuleConfig.ID { + return nil, fmt.Errorf("load balancer application rule was found by name (%s)"+ + ", but its ID (%s) does not match specified ID (%s)", + rule.Name, rule.ID, lbAppRuleConfig.ID) + } + return rule, nil + } + } + + return nil, ErrorEntityNotFound +} + +// ReadLBAppRuleById wraps getLbAppRule and needs only an ID for lookup +func (egw *EdgeGateway) GetLbAppRuleById(id string) (*types.LbAppRule, error) { + return egw.getLbAppRule(&types.LbAppRule{ID: id}) +} + +// GetLbAppRuleByName wraps getLbAppRule and needs only a Name for lookup +func (egw *EdgeGateway) GetLbAppRuleByName(name string) (*types.LbAppRule, error) { + return egw.getLbAppRule(&types.LbAppRule{Name: name}) +} + +// UpdateLbAppRule updates types.LbAppRule with all fields. At least name or ID must be specified. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) UpdateLbAppRule(lbAppRuleConfig *types.LbAppRule) (*types.LbAppRule, error) { + err := validateUpdateLbAppRule(lbAppRuleConfig, egw) + if err != nil { + return nil, err + } + + lbAppRuleConfig.ID, err = egw.getLbAppRuleIdByNameId(lbAppRuleConfig.Name, lbAppRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("cannot update load balancer application rule: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppRulePath + lbAppRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer application rule : %s", lbAppRuleConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readAppRule, err := egw.getLbAppRule(&types.LbAppRule{ID: lbAppRuleConfig.ID}) + if err != nil { + return nil, fmt.Errorf("unable to retrieve application rule with ID (%s) after update: %s", + lbAppRuleConfig.ID, err) + } + return readAppRule, nil +} + +// DeleteLbAppRule is able to delete the types.LbAppRule type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) DeleteLbAppRule(lbAppRuleConfig *types.LbAppRule) error { + err := validateDeleteLbAppRule(lbAppRuleConfig, egw) + if err != nil { + return err + } + + lbAppRuleConfig.ID, err = egw.getLbAppRuleIdByNameId(lbAppRuleConfig.Name, lbAppRuleConfig.ID) + if err != nil { + return fmt.Errorf("cannot update load balancer application rule: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbAppRulePath + lbAppRuleConfig.ID) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete application rule: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteLBAppRuleById wraps DeleteLbAppRule and requires only ID for deletion +func (egw *EdgeGateway) DeleteLbAppRuleById(id string) error { + return egw.DeleteLbAppRule(&types.LbAppRule{ID: id}) +} + +// DeleteLbAppRuleByName wraps DeleteLbAppRule and requires only Name for deletion +func (egw *EdgeGateway) DeleteLbAppRuleByName(name string) error { + return egw.DeleteLbAppRule(&types.LbAppRule{Name: name}) +} + +func validateCreateLbAppRule(lbAppRuleConfig *types.LbAppRule, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbAppRuleConfig.Name == "" { + return fmt.Errorf("load balancer application rule Name cannot be empty") + } + + return nil +} + +func validateGetLbAppRule(lbAppRuleConfig *types.LbAppRule, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbAppRuleConfig.ID == "" && lbAppRuleConfig.Name == "" { + return fmt.Errorf("to read load balancer application rule at least one of `ID`, `Name`" + + " fields must be specified") + } + + return nil +} + +func validateUpdateLbAppRule(lbAppRuleConfig *types.LbAppRule, egw *EdgeGateway) error { + // Update and create have the same requirements for now + return validateCreateLbAppRule(lbAppRuleConfig, egw) +} + +func validateDeleteLbAppRule(lbAppRuleConfig *types.LbAppRule, egw *EdgeGateway) error { + // Read and delete have the same requirements for now + return validateGetLbAppRule(lbAppRuleConfig, egw) +} + +// getLbAppRuleIdByNameId checks if at least name or ID is set and returns the ID. +// If the ID is specified - it passes through the ID. If only name was specified +// it will lookup the object by name and return the ID. +func (egw *EdgeGateway) getLbAppRuleIdByNameId(name, id string) (string, error) { + if name == "" && id == "" { + return "", fmt.Errorf("at least Name or ID must be specific to find load balancer "+ + "application rule got name (%s) ID (%s)", name, id) + } + if id != "" { + return id, nil + } + + // if only name was specified, ID must be found, because only ID can be used in request path + readlbAppRule, err := egw.GetLbAppRuleByName(name) + if err != nil { + return "", fmt.Errorf("unable to find load balancer application rule by name: %s", err) + } + return readlbAppRule.ID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbserverpool.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbserverpool.go new file mode 100644 index 000000000..631736dfe --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbserverpool.go @@ -0,0 +1,247 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// CreateLbServerPool creates a load balancer server pool based on mandatory fields. It is a synchronous +// operation. It returns created object with all fields (including ID) populated or an error. +// Name and Algorithm fields must be populated. +func (egw *EdgeGateway) CreateLbServerPool(lbPoolConfig *types.LbPool) (*types.LbPool, error) { + if err := validateCreateLbServerPool(lbPoolConfig, egw); err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbServerPoolPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating load balancer server pool: %s", lbPoolConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // Location: [/network/edges/edge-3/loadbalancer/config/pools/pool-7] + lbPoolID, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readPool, err := egw.GetLbServerPoolById(lbPoolID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve lb server pool with ID (%s) after creation: %s", lbPoolID, err) + } + return readPool, nil +} + +// getLbServerPool is able to find the types.LbPool type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) getLbServerPool(lbPoolConfig *types.LbPool) (*types.LbPool, error) { + if err := validateGetLbServerPool(lbPoolConfig, egw); err != nil { + return nil, err + } + + pools, err := egw.GetLbServerPools() + if err != nil { + return nil, err + } + + // Search for pool by ID or by Name + for _, pool := range pools { + // If ID was specified for lookup - look for the same ID + if lbPoolConfig.ID != "" && pool.ID == lbPoolConfig.ID { + return pool, nil + } + + // If Name was specified for lookup - look for the same Name + if lbPoolConfig.Name != "" && pool.Name == lbPoolConfig.Name { + // We found it by name. Let's verify if search ID was specified and it matches the lookup object + if lbPoolConfig.ID != "" && pool.ID != lbPoolConfig.ID { + return nil, fmt.Errorf("load balancer server pool was found by name (%s), but its ID (%s) does not match specified ID (%s)", + pool.Name, pool.ID, lbPoolConfig.ID) + } + return pool, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetLbServerPools return all created server pools without filtering. +func (egw *EdgeGateway) GetLbServerPools() ([]*types.LbPool, error) { + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbServerPoolPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Anonymous struct to unwrap "server pool response" + lbPoolResponse := &struct { + LBPools []*types.LbPool `xml:"pool"` + }{} + + // This query returns all server pools as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read load lalancer server pool: %s", nil, lbPoolResponse) + if err != nil { + return nil, err + } + + return lbPoolResponse.LBPools, nil +} + +// GetLbServerPoolByName wraps getLbServerPool and needs only an ID for lookup +func (egw *EdgeGateway) GetLbServerPoolById(id string) (*types.LbPool, error) { + return egw.getLbServerPool(&types.LbPool{ID: id}) +} + +// GetLbServerPoolByName wraps getLbServerPool and needs only a Name for lookup +func (egw *EdgeGateway) GetLbServerPoolByName(name string) (*types.LbPool, error) { + return egw.getLbServerPool(&types.LbPool{Name: name}) +} + +// UpdateLbServerPool updates types.LbPool with all fields. At least name or ID must be specified. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +// Name and Algorithm fields must be populated. +func (egw *EdgeGateway) UpdateLbServerPool(lbPoolConfig *types.LbPool) (*types.LbPool, error) { + err := validateUpdateLbServerPool(lbPoolConfig, egw) + if err != nil { + return nil, err + } + + lbPoolConfig.ID, err = egw.getLbServerPoolIdByNameId(lbPoolConfig.Name, lbPoolConfig.ID) + if err != nil { + return nil, fmt.Errorf("cannot update load balancer server pool: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbServerPoolPath + lbPoolConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer server pool : %s", lbPoolConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readPool, err := egw.GetLbServerPoolById(lbPoolConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve server pool with ID (%s) after update: %s", lbPoolConfig.ID, err) + } + return readPool, nil +} + +// DeleteLbServerPool is able to delete the types.LbPool type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) DeleteLbServerPool(lbPoolConfig *types.LbPool) error { + err := validateDeleteLbServerPool(lbPoolConfig, egw) + if err != nil { + return err + } + + lbPoolConfig.ID, err = egw.getLbServerPoolIdByNameId(lbPoolConfig.Name, lbPoolConfig.ID) + if err != nil { + return fmt.Errorf("cannot delete load balancer server pool: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbServerPoolPath + lbPoolConfig.ID) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete server pool: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteLbServerPoolById wraps DeleteLbServerPool and requires only ID for deletion +func (egw *EdgeGateway) DeleteLbServerPoolById(id string) error { + return egw.DeleteLbServerPool(&types.LbPool{ID: id}) +} + +// DeleteLbServerPoolByName wraps DeleteLbServerPool and requires only Name for deletion +func (egw *EdgeGateway) DeleteLbServerPoolByName(name string) error { + return egw.DeleteLbServerPool(&types.LbPool{Name: name}) +} + +func validateCreateLbServerPool(lbPoolConfig *types.LbPool, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbPoolConfig.Name == "" { + return fmt.Errorf("load balancer server pool Name cannot be empty") + } + + if lbPoolConfig.Algorithm == "" { + return fmt.Errorf("load balancer server pool Algorithm cannot be empty") + } + + for _, member := range lbPoolConfig.Members { + if member.Condition == "" { + return fmt.Errorf("load balancer server pool Member must have Condition set") + } + } + + return nil +} + +func validateGetLbServerPool(lbPoolConfig *types.LbPool, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbPoolConfig.ID == "" && lbPoolConfig.Name == "" { + return fmt.Errorf("to read load balancer server pool at least one of `ID`, `Name` fields must be specified") + } + + return nil +} + +func validateUpdateLbServerPool(lbPoolConfig *types.LbPool, egw *EdgeGateway) error { + // Update and create have the same requirements for now + return validateCreateLbServerPool(lbPoolConfig, egw) +} + +func validateDeleteLbServerPool(lbPoolConfig *types.LbPool, egw *EdgeGateway) error { + // Read and delete have the same requirements for now + return validateGetLbServerPool(lbPoolConfig, egw) +} + +// getLbServerPoolIdByNameId checks if at least name or ID is set and returns the ID. +// If the ID is specified - it passes through the ID. If only name was specified +// it will lookup the object by name and return the ID. +func (egw *EdgeGateway) getLbServerPoolIdByNameId(name, id string) (string, error) { + if name == "" && id == "" { + return "", fmt.Errorf("at least Name or ID must be specific to find load balancer "+ + "server pool got name (%s) ID (%s)", name, id) + } + if id != "" { + return id, nil + } + + // if only name was specified, ID must be found, because only ID can be used in request path + readlbServerPool, err := egw.GetLbServerPoolByName(name) + if err != nil { + return "", fmt.Errorf("unable to find load balancer server pool by name: %s", err) + } + return readlbServerPool.ID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbservicemonitor.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbservicemonitor.go new file mode 100644 index 000000000..bf100e4f8 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbservicemonitor.go @@ -0,0 +1,254 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// CreateLbServiceMonitor creates a load balancer service monitor based on mandatory fields. It is a synchronous +// operation. It returns created object with all fields (including ID) populated or an error. +func (egw *EdgeGateway) CreateLbServiceMonitor(lbMonitorConfig *types.LbMonitor) (*types.LbMonitor, error) { + if err := validateCreateLbServiceMonitor(lbMonitorConfig, egw); err != nil { + return nil, err + } + + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("edge gateway does not have advanced networking enabled") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbMonitorPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating load balancer service monitor: %s", lbMonitorConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // Location: [/network/edges/edge-3/loadbalancer/config/monitors/monitor-5] + lbMonitorID, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readMonitor, err := egw.GetLbServiceMonitorById(lbMonitorID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve monitor with ID (%s) after creation: %s", lbMonitorID, err) + } + return readMonitor, nil +} + +// getLbServiceMonitor is able to find the types.LbMonitor type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) getLbServiceMonitor(lbMonitorConfig *types.LbMonitor) (*types.LbMonitor, error) { + if err := validateGetLbServiceMonitor(lbMonitorConfig, egw); err != nil { + return nil, err + } + + serviceMonitors, err := egw.GetLbServiceMonitors() + if err != nil { + return nil, err + } + + // Search for monitor by ID or by Name + for _, monitor := range serviceMonitors { + // If ID was specified for lookup - look for the same ID + if lbMonitorConfig.ID != "" && monitor.ID == lbMonitorConfig.ID { + return monitor, nil + } + + // If Name was specified for lookup - look for the same Name + if lbMonitorConfig.Name != "" && monitor.Name == lbMonitorConfig.Name { + // We found it by name. Let's verify if search ID was specified and it matches the lookup object + if lbMonitorConfig.ID != "" && monitor.ID != lbMonitorConfig.ID { + return nil, fmt.Errorf("load balancer monitor was found by name (%s), but its ID (%s) does not match specified ID (%s)", + monitor.Name, monitor.ID, lbMonitorConfig.ID) + } + return monitor, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetLbServiceMonitors return all service monitors without filtering +func (egw *EdgeGateway) GetLbServiceMonitors() ([]*types.LbMonitor, error) { + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbMonitorPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Anonymous struct to unwrap "monitor response" + lbMonitorResponse := &struct { + LBMonitors []*types.LbMonitor `xml:"monitor"` + }{} + + // This query returns all service monitors as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, "unable to read Load Balancer monitor: %s", nil, lbMonitorResponse) + if err != nil { + return nil, err + } + + return lbMonitorResponse.LBMonitors, nil +} + +// GetLbServiceMonitorById wraps getLbServiceMonitor and needs only an ID for lookup +func (egw *EdgeGateway) GetLbServiceMonitorById(id string) (*types.LbMonitor, error) { + return egw.getLbServiceMonitor(&types.LbMonitor{ID: id}) +} + +// GetLbServiceMonitorByName wraps getLbServiceMonitor and needs only a Name for lookup +func (egw *EdgeGateway) GetLbServiceMonitorByName(name string) (*types.LbMonitor, error) { + return egw.getLbServiceMonitor(&types.LbMonitor{Name: name}) +} + +// UpdateLbServiceMonitor updates types.LbMonitor with all fields. At least name or ID must be specified. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) UpdateLbServiceMonitor(lbMonitorConfig *types.LbMonitor) (*types.LbMonitor, error) { + err := validateUpdateLbServiceMonitor(lbMonitorConfig, egw) + if err != nil { + return nil, err + } + + lbMonitorConfig.ID, err = egw.getLbServiceMonitorIdByNameId(lbMonitorConfig.Name, lbMonitorConfig.ID) + if err != nil { + return nil, fmt.Errorf("cannot update load balancer service monitor: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbMonitorPath + lbMonitorConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer service monitor : %s", lbMonitorConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readMonitor, err := egw.GetLbServiceMonitorById(lbMonitorConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve monitor with ID (%s) after update: %s", lbMonitorConfig.ID, err) + } + return readMonitor, nil +} + +// DeleteLbServiceMonitor is able to delete the types.LbMonitor type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) DeleteLbServiceMonitor(lbMonitorConfig *types.LbMonitor) error { + err := validateDeleteLbServiceMonitor(lbMonitorConfig, egw) + if err != nil { + return err + } + + lbMonitorConfig.ID, err = egw.getLbServiceMonitorIdByNameId(lbMonitorConfig.Name, lbMonitorConfig.ID) + if err != nil { + return fmt.Errorf("cannot delete load balancer service monitor: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbMonitorPath + lbMonitorConfig.ID) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete service monitor: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteLbServiceMonitorById wraps DeleteLbServiceMonitor and requires only ID for deletion +func (egw *EdgeGateway) DeleteLbServiceMonitorById(id string) error { + return egw.DeleteLbServiceMonitor(&types.LbMonitor{ID: id}) +} + +// DeleteLbServiceMonitorByName wraps DeleteLbServiceMonitor and requires only Name for deletion +func (egw *EdgeGateway) DeleteLbServiceMonitorByName(name string) error { + return egw.DeleteLbServiceMonitor(&types.LbMonitor{Name: name}) +} + +func validateCreateLbServiceMonitor(lbMonitorConfig *types.LbMonitor, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbMonitorConfig.Name == "" { + return fmt.Errorf("load balancer monitor Name cannot be empty") + } + + if lbMonitorConfig.Timeout == 0 { + return fmt.Errorf("load balancer monitor Timeout cannot be 0") + } + + if lbMonitorConfig.Interval == 0 { + return fmt.Errorf("load balancer monitor Interval cannot be 0") + } + + if lbMonitorConfig.MaxRetries == 0 { + return fmt.Errorf("load balancer monitor MaxRetries cannot be 0") + } + + if lbMonitorConfig.Type == "" { + return fmt.Errorf("load balancer monitor Type cannot be empty") + } + + return nil +} + +func validateGetLbServiceMonitor(lbMonitorConfig *types.LbMonitor, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbMonitorConfig.ID == "" && lbMonitorConfig.Name == "" { + return fmt.Errorf("to read load balancer service monitor at least one of `ID`, `Name` fields must be specified") + } + + return nil +} + +func validateUpdateLbServiceMonitor(lbMonitorConfig *types.LbMonitor, egw *EdgeGateway) error { + // Update and create have the same requirements for now + return validateCreateLbServiceMonitor(lbMonitorConfig, egw) +} + +func validateDeleteLbServiceMonitor(lbMonitorConfig *types.LbMonitor, egw *EdgeGateway) error { + // Read and delete have the same requirements for now + return validateGetLbServiceMonitor(lbMonitorConfig, egw) +} + +// getLbServiceMonitorIdByNameId checks if at least name or ID is set and returns the ID. +// If the ID is specified - it passes through the ID. If only name was specified +// it will lookup the object by name and return the ID. +func (egw *EdgeGateway) getLbServiceMonitorIdByNameId(name, id string) (string, error) { + if name == "" && id == "" { + return "", fmt.Errorf("at least Name or ID must be specific to find load balancer "+ + "service monitor got name (%s) ID (%s)", name, id) + } + if id != "" { + return id, nil + } + + // if only name was specified, ID must be found, because only ID can be used in request path + readlbServiceMonitor, err := egw.GetLbServiceMonitorByName(name) + if err != nil { + return "", fmt.Errorf("unable to find load balancer service monitor by name: %s", err) + } + return readlbServiceMonitor.ID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbvirtualserver.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbvirtualserver.go new file mode 100644 index 000000000..e12115a9d --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/lbvirtualserver.go @@ -0,0 +1,254 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// CreateLbVirtualServer creates a load balancer virtual server based on mandatory fields. It is a +// synchronous operation. It returns created object with all fields (including ID) populated +// or an error. +// Name, Protocol, Port and IpAddress fields must be populated +func (egw *EdgeGateway) CreateLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer) (*types.LbVirtualServer, error) { + if err := validateCreateLbVirtualServer(lbVirtualServerConfig, egw); err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbVirtualServerPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating load balancer virtual server: %s", lbVirtualServerConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // Location: [/network/edges/edge-3/loadbalancer/config/virtualservers/virtualServer-10] + lbVirtualServerId, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readVirtualServer, err := egw.GetLbVirtualServerById(lbVirtualServerId) + if err != nil { + return nil, fmt.Errorf("unable to retrieve load balancer virtual server with ID (%s) after creation: %s", + lbVirtualServerId, err) + } + return readVirtualServer, nil +} + +// getLbVirtualServer is able to find the types.LbVirtualServer type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the specified name and found +// name do not match. +func (egw *EdgeGateway) getLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer) (*types.LbVirtualServer, error) { + if err := validateGetLbVirtualServer(lbVirtualServerConfig, egw); err != nil { + return nil, err + } + + vs, err := egw.GetLbVirtualServers() + if err != nil { + return nil, err + } + + // Search for virtual server by ID or by Name + for _, virtualServer := range vs { + // If ID was specified for lookup - look for the same ID + if lbVirtualServerConfig.ID != "" && virtualServer.ID == lbVirtualServerConfig.ID { + return virtualServer, nil + } + + // If Name was specified for lookup - look for the same Name + if lbVirtualServerConfig.Name != "" && virtualServer.Name == lbVirtualServerConfig.Name { + // We found it by name. Let's verify if search ID was specified and it matches the lookup object + if lbVirtualServerConfig.ID != "" && virtualServer.ID != lbVirtualServerConfig.ID { + return nil, fmt.Errorf("load balancer virtual server was found by name (%s), "+ + "but its ID (%s) does not match specified ID (%s)", + virtualServer.Name, virtualServer.ID, lbVirtualServerConfig.ID) + } + return virtualServer, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetLbVirtualServers is getting all virtual servers without filtering anything +func (egw *EdgeGateway) GetLbVirtualServers() ([]*types.LbVirtualServer, error) { + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbVirtualServerPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Anonymous struct to unwrap "virtual server response" + lbVirtualServerResponse := &struct { + LBVirtualServers []*types.LbVirtualServer `xml:"virtualServer"` + }{} + + // This query returns all virtual servers as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read load balancer virtual server: %s", nil, lbVirtualServerResponse) + if err != nil { + return nil, err + } + + return lbVirtualServerResponse.LBVirtualServers, nil +} + +// GetLbVirtualServerById wraps getLbVirtualServers and needs only an ID for lookup +func (egw *EdgeGateway) GetLbVirtualServerById(id string) (*types.LbVirtualServer, error) { + return egw.getLbVirtualServer(&types.LbVirtualServer{ID: id}) +} + +// GetLbVirtualServerByName wraps getLbVirtualServers and needs only a Name for lookup +func (egw *EdgeGateway) GetLbVirtualServerByName(name string) (*types.LbVirtualServer, error) { + return egw.getLbVirtualServer(&types.LbVirtualServer{Name: name}) +} + +// UpdateLbVirtualServer updates types.LbVirtualServer with all fields. At least name or ID must be +// specified. If both - Name and ID are specified it performs a lookup by ID and returns an error if +// the specified name and found name do not match. +// Name, Protocol, Port and IpAddress fields must be populated +func (egw *EdgeGateway) UpdateLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer) (*types.LbVirtualServer, error) { + err := validateUpdateLbVirtualServer(lbVirtualServerConfig, egw) + if err != nil { + return nil, err + } + + lbVirtualServerConfig.ID, err = egw.getLbVirtualServerIdByNameId(lbVirtualServerConfig.Name, lbVirtualServerConfig.ID) + if err != nil { + return nil, fmt.Errorf("cannot update load balancer virtual server: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbVirtualServerPath + lbVirtualServerConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating load balancer virtual server : %s", lbVirtualServerConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readVirtualServer, err := egw.GetLbVirtualServerById(lbVirtualServerConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve virtual server with ID (%s) after update: %s", + lbVirtualServerConfig.ID, err) + } + return readVirtualServer, nil +} + +// DeleteLbVirtualServer is able to delete the types.LbVirtualServer type by Name and/or ID. +// If both - Name and ID are specified it performs a lookup by ID and returns an error if the +// specified name and found name do not match. +func (egw *EdgeGateway) DeleteLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer) error { + err := validateDeleteLbVirtualServer(lbVirtualServerConfig, egw) + if err != nil { + return err + } + + lbVirtualServerConfig.ID, err = egw.getLbVirtualServerIdByNameId(lbVirtualServerConfig.Name, lbVirtualServerConfig.ID) + if err != nil { + return fmt.Errorf("cannot delete load balancer virtual server: %s", err) + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.LbVirtualServerPath + lbVirtualServerConfig.ID) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete load balancer virtual server: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteLbVirtualServerById wraps DeleteLbVirtualServer and requires only ID for deletion +func (egw *EdgeGateway) DeleteLbVirtualServerById(id string) error { + return egw.DeleteLbVirtualServer(&types.LbVirtualServer{ID: id}) +} + +// DeleteLbVirtualServerByName wraps DeleteLbVirtualServer and requires only Name for deletion +func (egw *EdgeGateway) DeleteLbVirtualServerByName(name string) error { + return egw.DeleteLbVirtualServer(&types.LbVirtualServer{Name: name}) +} + +func validateCreateLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbVirtualServerConfig.Name == "" { + return fmt.Errorf("load balancer virtual server Name cannot be empty") + } + + if lbVirtualServerConfig.IpAddress == "" { + return fmt.Errorf("load balancer virtual server IpAddress cannot be empty") + } + + if lbVirtualServerConfig.Protocol == "" { + return fmt.Errorf("load balancer virtual server Protocol cannot be empty") + } + + if lbVirtualServerConfig.Port == 0 { + return fmt.Errorf("load balancer virtual server Port cannot be empty") + } + + return nil +} + +func validateGetLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support load balancers") + } + + if lbVirtualServerConfig.ID == "" && lbVirtualServerConfig.Name == "" { + return fmt.Errorf("to read load balancer virtual server at least one of `ID`, `Name` " + + "fields must be specified") + } + + return nil +} + +func validateUpdateLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer, egw *EdgeGateway) error { + // Update and create have the same requirements for now + return validateCreateLbVirtualServer(lbVirtualServerConfig, egw) +} + +func validateDeleteLbVirtualServer(lbVirtualServerConfig *types.LbVirtualServer, egw *EdgeGateway) error { + // Read and delete have the same requirements for now + return validateGetLbVirtualServer(lbVirtualServerConfig, egw) +} + +// getLbVirtualServerIdByNameId checks if at least name or ID is set and returns the ID. +// If the ID is specified - it passes through the ID. If only name was specified +// it will lookup the object by name and return the ID. +func (egw *EdgeGateway) getLbVirtualServerIdByNameId(name, id string) (string, error) { + if name == "" && id == "" { + return "", fmt.Errorf("at least Name or ID must be specific to find load balancer "+ + "virtual server got name (%s) ID (%s)", name, id) + } + if id != "" { + return id, nil + } + + // if only name was specified, ID must be found, because only ID can be used in request path + readLbVirtualServer, err := egw.GetLbVirtualServerByName(name) + if err != nil { + return "", fmt.Errorf("unable to find load balancer virtual server by name: %s", err) + } + return readLbVirtualServer.ID, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/media.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/media.go new file mode 100644 index 000000000..019f4b477 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/media.go @@ -0,0 +1,697 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// Deprecated: use MediaRecord +type MediaItem struct { + MediaItem *types.MediaRecordType + vdc *Vdc +} + +// Deprecated: use NewMediaRecord +func NewMediaItem(vdc *Vdc) *MediaItem { + return &MediaItem{ + MediaItem: new(types.MediaRecordType), + vdc: vdc, + } +} + +type Media struct { + Media *types.Media + client *Client +} + +func NewMedia(cli *Client) *Media { + return &Media{ + Media: new(types.Media), + client: cli, + } +} + +type MediaRecord struct { + MediaRecord *types.MediaRecordType + client *Client +} + +func NewMediaRecord(cli *Client) *MediaRecord { + return &MediaRecord{ + MediaRecord: new(types.MediaRecordType), + client: cli, + } +} + +// Uploads an ISO file as media. This method only uploads bits to vCD spool area. +// Returns errors if any occur during upload from vCD or upload process. On upload fail client may need to +// remove vCD catalog item which waits for files to be uploaded. +// +// Deprecated: This method is broken in API V32.0+. Please use catalog.UploadMediaImage because VCD does not support +// uploading directly to VDC anymore. +func (vdc *Vdc) UploadMediaImage(mediaName, mediaDescription, filePath string, uploadPieceSize int64) (UploadTask, error) { + util.Logger.Printf("[TRACE] UploadImage: %s, image name: %v \n", mediaName, mediaDescription) + + // On a very high level the flow is as follows + // 1. Makes a POST call to vCD to create media item(also creates a transfer folder in the spool area and as result will give a media item resource XML). + // 2. Start uploading bits to the transfer folder + // 3. Wait on the import task to finish on vCD side -> task success = upload complete + + if *vdc == (Vdc{}) { + return UploadTask{}, errors.New("vdc can not be empty or nil") + } + + mediaFilePath, err := validateAndFixFilePath(filePath) + if err != nil { + return UploadTask{}, err + } + + isISOGood, err := verifyIso(mediaFilePath) + if err != nil || !isISOGood { + return UploadTask{}, fmt.Errorf("[ERROR] File %s isn't correct iso file: %s", mediaFilePath, err) + } + + mediaList, err := getExistingMedia(vdc) + if err != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Checking existing media files failed: %s", err) + } + + for _, media := range mediaList { + if media.Name == mediaName { + return UploadTask{}, fmt.Errorf("media item '%s' already exists. Upload with different name", mediaName) + } + } + + file, e := os.Stat(mediaFilePath) + if e != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Issue finding file: %#v", e) + } + fileSize := file.Size() + + media, err := createMedia(vdc.client, vdc.Vdc.HREF+"/media", mediaName, mediaDescription, fileSize) + if err != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Issue creating media: %s", err) + } + + return executeUpload(vdc.client, media, mediaFilePath, mediaName, fileSize, uploadPieceSize) +} + +func executeUpload(client *Client, media *types.Media, mediaFilePath, mediaName string, fileSize, uploadPieceSize int64) (UploadTask, error) { + uploadLink, err := getUploadLink(media.Files) + if err != nil { + return UploadTask{}, fmt.Errorf("[ERROR] Issue getting upload link: %s", err) + } + + callBack, uploadProgress := getProgressCallBackFunction() + + uploadError := *new(error) + + details := uploadDetails{ + uploadLink: uploadLink.String(), // just take string + uploadedBytes: 0, + fileSizeToUpload: fileSize, + uploadPieceSize: uploadPieceSize, + uploadedBytesForCallback: 0, + allFilesSize: fileSize, + callBack: callBack, + uploadError: &uploadError, + } + + // sending upload process to background, this allows not to lock and return task to client + // The error should be captured in details.uploadError, but just in case, we add a logging for the + // main error + go func() { + _, err = uploadFile(client, mediaFilePath, details) + if err != nil { + util.Logger.Println(strings.Repeat("*", 80)) + util.Logger.Printf("*** [DEBUG - executeUpload] error calling uploadFile: %s\n", err) + util.Logger.Println(strings.Repeat("*", 80)) + } + }() + + var task Task + for _, item := range media.Tasks.Task { + task, err = createTaskForVcdImport(client, item.HREF) + if err != nil { + removeImageOnError(client, media, mediaName) + return UploadTask{}, err + } + if task.Task.Status == "error" { + removeImageOnError(client, media, mediaName) + return UploadTask{}, fmt.Errorf("task did not complete succesfully: %s", task.Task.Description) + } + } + + uploadTask := NewUploadTask(&task, uploadProgress, &uploadError) + + util.Logger.Printf("[TRACE] Upload media function finished and task for vcd import created. \n") + + return *uploadTask, nil +} + +// Initiates creation of media item and returns temporary upload URL. +func createMedia(client *Client, link, mediaName, mediaDescription string, fileSize int64) (*types.Media, error) { + uploadUrl, err := url.ParseRequestURI(link) + if err != nil { + return nil, fmt.Errorf("error getting vdc href: %s", err) + } + + reqBody := bytes.NewBufferString( + "" + + "" + mediaDescription + "" + + "") + + request := client.NewRequest(map[string]string{}, http.MethodPost, *uploadUrl, reqBody) + request.Header.Add("Content-Type", "application/vnd.vmware.vcloud.media+xml") + + response, err := checkResp(client.Http.Do(request)) + if err != nil { + return nil, err + } + defer response.Body.Close() + + mediaForUpload := &types.Media{} + if err = decodeBody(types.BodyTypeXML, response, mediaForUpload); err != nil { + return nil, err + } + + util.Logger.Printf("[TRACE] Media item parsed: %#v\n", mediaForUpload) + + if mediaForUpload.Tasks != nil { + for _, task := range mediaForUpload.Tasks.Task { + if task.Status == "error" && mediaName == mediaForUpload.Name { + util.Logger.Printf("[Error] issue with creating media %#v", task.Error) + return nil, fmt.Errorf("error in vcd returned error code: %d, error: %s and message: %s ", task.Error.MajorErrorCode, task.Error.MinorErrorCode, task.Error.Message) + } + } + } + + return mediaForUpload, nil +} + +func removeImageOnError(client *Client, media *types.Media, itemName string) { + if media != nil { + util.Logger.Printf("[TRACE] Deleting media item %#v", media) + + // wait for task, cancel it and media item will be removed. + var err error + for { + util.Logger.Printf("[TRACE] Sleep... for 5 seconds.\n") + time.Sleep(time.Second * 5) + media, err = queryMedia(client, media.HREF, itemName) + if err != nil { + util.Logger.Printf("[Error] Error deleting media item %v: %s", media, err) + } + if len(media.Tasks.Task) > 0 { + util.Logger.Printf("[TRACE] Task found. Will try to cancel.\n") + break + } + } + + for _, taskItem := range media.Tasks.Task { + if itemName == taskItem.Owner.Name { + task := NewTask(client) + task.Task = taskItem + err = task.CancelTask() + if err != nil { + util.Logger.Printf("[ERROR] Error canceling task for media upload %s", err) + } + } + } + } else { + util.Logger.Printf("[Error] Failed to delete media item created with error: %v", media) + } +} + +func queryMedia(client *Client, mediaUrl string, newItemName string) (*types.Media, error) { + util.Logger.Printf("[TRACE] Querying media: %s\n", mediaUrl) + + mediaParsed := &types.Media{} + + _, err := client.ExecuteRequest(mediaUrl, http.MethodGet, + "", "error quering media: %s", nil, mediaParsed) + if err != nil { + return nil, err + } + + for _, task := range mediaParsed.Tasks.Task { + if task.Status == "error" && newItemName == task.Owner.Name { + util.Logger.Printf("[Error] %#v", task.Error) + return mediaParsed, fmt.Errorf("error in vcd returned error code: %d, error: %s and message: %s ", task.Error.MajorErrorCode, task.Error.MinorErrorCode, task.Error.Message) + } + } + + return mediaParsed, nil +} + +// Verifies provided file header matches standard +func verifyIso(filePath string) (bool, error) { + // #nosec G304 - linter does not like 'filePath' to be a variable. However this is necessary for file uploads. + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + return readHeader(file) +} + +func readHeader(reader io.Reader) (bool, error) { + buffer := make([]byte, 37000) + + _, err := reader.Read(buffer) + if err != nil && err != io.EOF { + return false, err + } + + headerOk := verifyHeader(buffer) + + if headerOk { + return true, nil + } else { + return false, errors.New("file header didn't match ISO standard") + } +} + +// Verify file header info: https://www.garykessler.net/library/file_sigs.html +func verifyHeader(buf []byte) bool { + // search for CD001(43 44 30 30 31) in specific file places. + //This signature usually occurs at byte offset 32769 (0x8001), + //34817 (0x8801), or 36865 (0x9001). + return (buf[32769] == 0x43 && buf[32770] == 0x44 && + buf[32771] == 0x30 && buf[32772] == 0x30 && buf[32773] == 0x31) || + (buf[34817] == 0x43 && buf[34818] == 0x44 && + buf[34819] == 0x30 && buf[34820] == 0x30 && buf[34821] == 0x31) || + (buf[36865] == 0x43 && buf[36866] == 0x44 && + buf[36867] == 0x30 && buf[36868] == 0x30 && buf[36869] == 0x31) +} + +// Reference for API usage http://pubs.vmware.com/vcloud-api-1-5/wwhelp/wwhimpl/js/html/wwhelp.htm#href=api_prog/GUID-9356B99B-E414-474A-853C-1411692AF84C.html +// http://pubs.vmware.com/vcloud-api-1-5/wwhelp/wwhimpl/js/html/wwhelp.htm#href=api_prog/GUID-43DFF30E-391F-42DC-87B3-5923ABCEB366.html +func getExistingMedia(vdc *Vdc) ([]*types.MediaRecordType, error) { + util.Logger.Printf("[TRACE] Querying medias \n") + + mediaResults, err := queryMediaWithFilter(vdc, "vdc=="+url.QueryEscape(vdc.Vdc.HREF)) + + util.Logger.Printf("[TRACE] Found media records: %d \n", len(mediaResults)) + return mediaResults, err +} + +func queryMediaWithFilter(vdc *Vdc, filter string) ([]*types.MediaRecordType, error) { + typeMedia := "media" + if vdc.client.IsSysAdmin { + typeMedia = "adminMedia" + } + + results, err := vdc.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, "filter": filter, "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error querying medias %s", err) + } + + mediaResults := results.Results.MediaRecord + if vdc.client.IsSysAdmin { + mediaResults = results.Results.AdminMediaRecord + } + return mediaResults, nil +} + +// Looks for media and, if found, will delete it. +// Deprecated: Use catalog.RemoveMediaIfExist +func RemoveMediaImageIfExists(vdc Vdc, mediaName string) error { + mediaItem, err := vdc.FindMediaImage(mediaName) + if err == nil && mediaItem != (MediaItem{}) { + task, err := mediaItem.Delete() + if err != nil { + return fmt.Errorf("error deleting media [phase 1] %s", mediaName) + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error deleting media [task] %s", mediaName) + } + } else { + util.Logger.Printf("[TRACE] Media not found or error: %s - %#v \n", err, mediaItem) + } + return nil +} + +// Looks for media and, if found, will delete it. +func (adminCatalog *AdminCatalog) RemoveMediaIfExists(mediaName string) error { + media, err := adminCatalog.GetMediaByName(mediaName, true) + if err == nil { + task, err := media.Delete() + if err != nil { + return fmt.Errorf("error deleting media [phase 1] %s", mediaName) + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error deleting media [task] %s", mediaName) + } + } else { + util.Logger.Printf("[TRACE] Media not found or error: %s - %#v \n", err, media) + } + return nil +} + +// Deletes the Media Item, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Media.html +// Deprecated: Use MediaRecord.Delete +func (mediaItem *MediaItem) Delete() (Task, error) { + util.Logger.Printf("[TRACE] Deleting media item: %#v", mediaItem.MediaItem.Name) + + // Return the task + return mediaItem.vdc.client.ExecuteTaskRequest(mediaItem.MediaItem.HREF, http.MethodDelete, + "", "error deleting Media item: %s", nil) +} + +// Deletes the Media Item, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Media.html +func (media *Media) Delete() (Task, error) { + util.Logger.Printf("[TRACE] Deleting media item: %#v", media.Media.Name) + + // Return the task + return media.client.ExecuteTaskRequest(media.Media.HREF, http.MethodDelete, + "", "error deleting Media item: %s", nil) +} + +// Finds media in catalog and returns catalog item +// Deprecated: Use catalog.GetMediaByName() +func FindMediaAsCatalogItem(org *Org, catalogName, mediaName string) (CatalogItem, error) { + if catalogName == "" { + return CatalogItem{}, errors.New("catalog name is empty") + } + if mediaName == "" { + return CatalogItem{}, errors.New("media name is empty") + } + + catalog, err := org.FindCatalog(catalogName) + if err != nil || catalog == (Catalog{}) { + return CatalogItem{}, fmt.Errorf("catalog not found or error %s", err) + } + + media, err := catalog.FindCatalogItem(mediaName) + if err != nil || media == (CatalogItem{}) { + return CatalogItem{}, fmt.Errorf("media not found or error %s", err) + } + return media, nil +} + +// Refresh refreshes the media item information by href +// Deprecated: Use MediaRecord.Refresh +func (mediaItem *MediaItem) Refresh() error { + + if mediaItem.MediaItem == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + if mediaItem.MediaItem.Name == "nil" { + return fmt.Errorf("cannot refresh, Name is empty") + } + + latestMediaItem, err := mediaItem.vdc.FindMediaImage(mediaItem.MediaItem.Name) + *mediaItem = latestMediaItem + + return err +} + +// Refresh refreshes the media information by href +func (media *Media) Refresh() error { + + if media.Media == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + url := media.Media.HREF + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + media.Media = &types.Media{} + + _, err := media.client.ExecuteRequest(url, http.MethodGet, + "", "error retrieving media: %s", nil, media.Media) + + return err +} + +// GetMediaByHref finds a Media by HREF +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetMediaByHref(mediaHref string) (*Media, error) { + + media := NewMedia(cat.client) + + _, err := cat.client.ExecuteRequest(mediaHref, http.MethodGet, + "", "error retrieving media: %#v", nil, media.Media) + if err != nil && strings.Contains(err.Error(), "MajorErrorCode:403") { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, err + } + return media, nil +} + +// GetMediaByName finds a Media by Name +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetMediaByName(mediaName string, refresh bool) (*Media, error) { + if refresh { + err := cat.Refresh() + if err != nil { + return nil, err + } + } + for _, catalogItems := range cat.Catalog.CatalogItems { + for _, catalogItem := range catalogItems.CatalogItem { + if catalogItem.Name == mediaName && catalogItem.Type == "application/vnd.vmware.vcloud.catalogItem+xml" { + catalogItemElement, err := cat.GetCatalogItemByHref(catalogItem.HREF) + if err != nil { + return nil, err + } + return cat.GetMediaByHref(catalogItemElement.CatalogItem.Entity.HREF) + } + } + } + return nil, ErrorEntityNotFound +} + +// GetMediaById finds a Media by ID +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (catalog *Catalog) GetMediaById(mediaId string) (*Media, error) { + typeMedia := "media" + if catalog.client.IsSysAdmin { + typeMedia = "adminMedia" + } + + results, err := catalog.client.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, + "filter": fmt.Sprintf("catalogName==%s", url.QueryEscape(catalog.Catalog.Name)), + "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error querying medias %s", err) + } + + mediaResults := results.Results.MediaRecord + if catalog.client.IsSysAdmin { + mediaResults = results.Results.AdminMediaRecord + } + for _, mediaRecord := range mediaResults { + if equalIds(mediaId, mediaRecord.ID, mediaRecord.HREF) { + return catalog.GetMediaByHref(mediaRecord.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetMediaByNameOrId finds a Media by Name or ID +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (cat *Catalog) GetMediaByNameOrId(identifier string, refresh bool) (*Media, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return cat.GetMediaByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return cat.GetMediaById(id) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*Media), err +} + +// GetMediaByHref finds a Media by HREF +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (adminCatalog *AdminCatalog) GetMediaByHref(mediaHref string) (*Media, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.GetMediaByHref(mediaHref) +} + +// GetMediaByName finds a Media by Name +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (adminCatalog *AdminCatalog) GetMediaByName(mediaName string, refresh bool) (*Media, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.GetMediaByName(mediaName, refresh) +} + +// GetMediaById finds a Media by ID +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (adminCatalog *AdminCatalog) GetMediaById(mediaId string) (*Media, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.GetMediaById(mediaId) +} + +// GetMediaByNameOrId finds a Media by Name or ID +// On success, returns a pointer to the Media structure and a nil error +// On failure, returns a nil pointer and an error +func (adminCatalog *AdminCatalog) GetMediaByNameOrId(identifier string, refresh bool) (*Media, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.GetMediaByNameOrId(identifier, refresh) +} + +// QueryMedia returns media image found in system using `name` and `catalog name` as query. +func (catalog *Catalog) QueryMedia(mediaName string) (*MediaRecord, error) { + util.Logger.Printf("[TRACE] Querying medias by name and catalog\n") + + if catalog == nil || catalog.Catalog == nil || catalog.Catalog.Name == "" { + return nil, errors.New("catalog is empty") + } + if mediaName == "" { + return nil, errors.New("media name is empty") + } + + typeMedia := "media" + if catalog.client.IsSysAdmin { + typeMedia = "adminMedia" + } + + results, err := catalog.client.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, + "filter": fmt.Sprintf("name==%s;catalogName==%s", + url.QueryEscape(mediaName), + url.QueryEscape(catalog.Catalog.Name)), + "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error querying medias %s", err) + } + newMediaRecord := NewMediaRecord(catalog.client) + + mediaResults := results.Results.MediaRecord + if catalog.client.IsSysAdmin { + mediaResults = results.Results.AdminMediaRecord + } + if len(mediaResults) == 1 { + newMediaRecord.MediaRecord = mediaResults[0] + } + + if len(mediaResults) == 0 { + return nil, ErrorEntityNotFound + } + // this shouldn't happen, but we will check anyways + if len(mediaResults) > 1 { + return nil, fmt.Errorf("found more than one result %#v with catalog name %s and media name %s ", mediaResults, catalog.Catalog.Name, mediaName) + } + + util.Logger.Printf("[TRACE] Found media record by name: %#v \n", mediaResults[0]) + return newMediaRecord, nil +} + +// QueryMedia returns media image found in system using `name` and `catalog name` as query. +func (adminCatalog *AdminCatalog) QueryMedia(mediaName string) (*MediaRecord, error) { + catalog := NewCatalog(adminCatalog.client) + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + catalog.parent = adminCatalog.parent + return catalog.QueryMedia(mediaName) +} + +// Refresh refreshes the media information by href +func (mediaRecord *MediaRecord) Refresh() error { + + if mediaRecord.MediaRecord == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + if mediaRecord.MediaRecord.Name == "" { + return fmt.Errorf("cannot refresh, Name is empty") + } + + url := mediaRecord.MediaRecord.HREF + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + mediaRecord.MediaRecord = &types.MediaRecordType{} + + _, err := mediaRecord.client.ExecuteRequest(url, http.MethodGet, + "", "error retrieving media: %s", nil, mediaRecord.MediaRecord) + + return err +} + +// Deletes the Media Item, returning an error if the vCD call fails. +// Link to API call: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Media.html +func (mediaRecord *MediaRecord) Delete() (Task, error) { + util.Logger.Printf("[TRACE] Deleting media item: %#v", mediaRecord.MediaRecord.Name) + + // Return the task + return mediaRecord.client.ExecuteTaskRequest(mediaRecord.MediaRecord.HREF, http.MethodDelete, + "", "error deleting Media item: %s", nil) +} + +// QueryAllMedia returns all media images found in system using `name` as query. +func (vdc *Vdc) QueryAllMedia(mediaName string) ([]*MediaRecord, error) { + util.Logger.Printf("[TRACE] Querying medias by name\n") + + if mediaName == "" { + return nil, errors.New("media name is empty") + } + + typeMedia := "media" + if vdc.client.IsSysAdmin { + typeMedia = "adminMedia" + } + + results, err := vdc.client.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, + "filter": fmt.Sprintf("name==%s", url.QueryEscape(mediaName))}) + if err != nil { + return nil, fmt.Errorf("error querying medias %s", err) + } + + mediaResults := results.Results.MediaRecord + if vdc.client.IsSysAdmin { + mediaResults = results.Results.AdminMediaRecord + } + + if len(mediaResults) == 0 { + return nil, ErrorEntityNotFound + } + + var newMediaRecords []*MediaRecord + for _, mediaResult := range mediaResults { + newMediaRecord := NewMediaRecord(vdc.client) + newMediaRecord.MediaRecord = mediaResult + newMediaRecords = append(newMediaRecords, newMediaRecord) + } + + util.Logger.Printf("[TRACE] Found media records by name: %#v \n", mediaResults) + return newMediaRecords, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/metadata.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/metadata.go new file mode 100644 index 000000000..7d616ad12 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/metadata.go @@ -0,0 +1,359 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// GetMetadata calls private function getMetadata() with vm.client and vm.VM.HREF +// which returns a *types.Metadata struct for provided VM input. +func (vm *VM) GetMetadata() (*types.Metadata, error) { + return getMetadata(vm.client, vm.VM.HREF) +} + +// DeleteMetadata() function calls private function deleteMetadata() with vm.client and vm.VM.HREF +// which deletes metadata depending on key provided as input from VM. +func (vm *VM) DeleteMetadata(key string) (Task, error) { + return deleteMetadata(vm.client, key, vm.VM.HREF) +} + +// AddMetadata calls private function addMetadata() with vm.client and vm.VM.HREF +// which adds metadata key/value pair provided as input to VM. +func (vm *VM) AddMetadata(key string, value string) (Task, error) { + return addMetadata(vm.client, key, value, vm.VM.HREF) +} + +// GetMetadata returns meta data for VDC. +func (vdc *Vdc) GetMetadata() (*types.Metadata, error) { + return getMetadata(vdc.client, getAdminVdcURL(vdc.Vdc.HREF)) +} + +// DeleteMetadata() function deletes metadata by key provided as input +func (vdc *Vdc) DeleteMetadata(key string) (Vdc, error) { + task, err := deleteMetadata(vdc.client, key, getAdminVdcURL(vdc.Vdc.HREF)) + if err != nil { + return Vdc{}, err + } + + err = task.WaitTaskCompletion() + if err != nil { + return Vdc{}, err + } + + err = vdc.Refresh() + if err != nil { + return Vdc{}, err + } + + return *vdc, nil +} + +// AddMetadata adds metadata key/value pair provided as input to VDC. +func (vdc *Vdc) AddMetadata(key string, value string) (Vdc, error) { + task, err := addMetadata(vdc.client, key, value, getAdminVdcURL(vdc.Vdc.HREF)) + if err != nil { + return Vdc{}, err + } + + err = task.WaitTaskCompletion() + if err != nil { + return Vdc{}, err + } + + err = vdc.Refresh() + if err != nil { + return Vdc{}, err + } + + return *vdc, nil +} + +// AddMetadata adds metadata key/value pair provided as input to VDC. +// and returns task +func (vdc *Vdc) AddMetadataAsync(key string, value string) (Task, error) { + return addMetadata(vdc.client, key, value, getAdminVdcURL(vdc.Vdc.HREF)) +} + +// DeleteMetadata() function deletes metadata by key provided as input +// and returns task +func (vdc *Vdc) DeleteMetadataAsync(key string) (Task, error) { + return deleteMetadata(vdc.client, key, getAdminVdcURL(vdc.Vdc.HREF)) +} + +func getAdminVdcURL(vdcURL string) string { + return strings.Split(vdcURL, "/api/vdc/")[0] + "/api/admin/vdc/" + strings.Split(vdcURL, "/api/vdc/")[1] +} + +// GetMetadata calls private function getMetadata() with vapp.client and vapp.VApp.HREF +// which returns a *types.Metadata struct for provided vapp input. +func (vapp *VApp) GetMetadata() (*types.Metadata, error) { + return getMetadata(vapp.client, vapp.VApp.HREF) +} + +func getMetadata(client *Client, requestUri string) (*types.Metadata, error) { + metadata := &types.Metadata{} + + _, err := client.ExecuteRequest(requestUri+"/metadata/", http.MethodGet, + types.MimeMetaData, "error retrieving metadata: %s", nil, metadata) + + return metadata, err +} + +// DeleteMetadata() function calls private function deleteMetadata() with vapp.client and vapp.VApp.HREF +// which deletes metadata depending on key provided as input from vApp. +func (vapp *VApp) DeleteMetadata(key string) (Task, error) { + return deleteMetadata(vapp.client, key, vapp.VApp.HREF) +} + +// Deletes metadata (type MetadataStringValue) from the vApp +// TODO: Support all MetadataTypedValue types with this function +func deleteMetadata(client *Client, key string, requestUri string) (Task, error) { + apiEndpoint := urlParseRequestURI(requestUri) + apiEndpoint.Path += "/metadata/" + key + + // Return the task + return client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodDelete, + "", "error deleting metadata: %s", nil) +} + +// AddMetadata calls private function addMetadata() with vapp.client and vapp.VApp.HREF +// which adds metadata key/value pair provided as input +func (vapp *VApp) AddMetadata(key string, value string) (Task, error) { + return addMetadata(vapp.client, key, value, vapp.VApp.HREF) +} + +// Adds metadata (type MetadataStringValue) to the vApp +// TODO: Support all MetadataTypedValue types with this function +func addMetadata(client *Client, key string, value string, requestUri string) (Task, error) { + newMetadata := &types.MetadataValue{ + Xmlns: types.XMLNamespaceVCloud, + Xsi: types.XMLNamespaceXSI, + TypedValue: &types.TypedValue{ + XsiType: "MetadataStringValue", + Value: value, + }, + } + + apiEndpoint := urlParseRequestURI(requestUri) + apiEndpoint.Path += "/metadata/" + key + + // Return the task + return client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeMetaDataValue, "error adding metadata: %s", newMetadata) +} + +// GetMetadata calls private function getMetadata() with catalogItem.client and catalogItem.CatalogItem.HREF +// which returns a *types.Metadata struct for provided catalog item input. +func (vAppTemplate *VAppTemplate) GetMetadata() (*types.Metadata, error) { + return getMetadata(vAppTemplate.client, vAppTemplate.VAppTemplate.HREF) +} + +// AddMetadata adds metadata key/value pair provided as input and returned update VAppTemplate +func (vAppTemplate *VAppTemplate) AddMetadata(key string, value string) (*VAppTemplate, error) { + task, err := vAppTemplate.AddMetadataAsync(key, value) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("error completing add metadata for vApp template task: %s", err) + } + + err = vAppTemplate.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vApp template: %s", err) + } + + return vAppTemplate, nil +} + +// AddMetadataAsync calls private function addMetadata() with vAppTemplate.client and vAppTemplate.VAppTemplate.HREF +// which adds metadata key/value pair provided as input. +func (vAppTemplate *VAppTemplate) AddMetadataAsync(key string, value string) (Task, error) { + return addMetadata(vAppTemplate.client, key, value, vAppTemplate.VAppTemplate.HREF) +} + +// DeleteMetadata deletes metadata depending on key provided as input from media item. +func (vAppTemplate *VAppTemplate) DeleteMetadata(key string) error { + task, err := vAppTemplate.DeleteMetadataAsync(key) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error completing delete metadata for vApp template task: %s", err) + } + + return nil +} + +// DeleteMetadataAsync calls private function deleteMetadata() with vAppTemplate.client and vAppTemplate.VAppTemplate.HREF +// which deletes metadata depending on key provided as input from catalog item. +func (vAppTemplate *VAppTemplate) DeleteMetadataAsync(key string) (Task, error) { + return deleteMetadata(vAppTemplate.client, key, vAppTemplate.VAppTemplate.HREF) +} + +// GetMetadata calls private function getMetadata() with mediaItem.client and mediaItem.MediaItem.HREF +// which returns a *types.Metadata struct for provided media item input. +// Deprecated: Use MediaRecord.GetMetadata +func (mediaItem *MediaItem) GetMetadata() (*types.Metadata, error) { + return getMetadata(mediaItem.vdc.client, mediaItem.MediaItem.HREF) +} + +// AddMetadata adds metadata key/value pair provided as input. +// Deprecated: Use MediaRecord.AddMetadata +func (mediaItem *MediaItem) AddMetadata(key string, value string) (*MediaItem, error) { + task, err := mediaItem.AddMetadataAsync(key, value) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("error completing add metadata for media item task: %s", err) + } + + err = mediaItem.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing media item: %s", err) + } + + return mediaItem, nil +} + +// AddMetadataAsync calls private function addMetadata() with mediaItem.client and mediaItem.MediaItem.HREF +// which adds metadata key/value pair provided as input. +// Deprecated: Use MediaRecord.AddMetadataAsync +func (mediaItem *MediaItem) AddMetadataAsync(key string, value string) (Task, error) { + return addMetadata(mediaItem.vdc.client, key, value, mediaItem.MediaItem.HREF) +} + +// DeleteMetadata deletes metadata depending on key provided as input from media item. +// Deprecated: Use MediaRecord.DeleteMetadata +func (mediaItem *MediaItem) DeleteMetadata(key string) error { + task, err := mediaItem.DeleteMetadataAsync(key) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error completing delete metadata for media item task: %s", err) + } + + return nil +} + +// DeleteMetadataAsync calls private function deleteMetadata() with mediaItem.client and mediaItem.MediaItem.HREF +// which deletes metadata depending on key provided as input from media item. +// Deprecated: Use MediaRecord.DeleteMetadataAsync +func (mediaItem *MediaItem) DeleteMetadataAsync(key string) (Task, error) { + return deleteMetadata(mediaItem.vdc.client, key, mediaItem.MediaItem.HREF) +} + +// GetMetadata calls private function getMetadata() with MediaRecord.client and MediaRecord.MediaRecord.HREF +// which returns a *types.Metadata struct for provided media item input. +func (mediaRecord *MediaRecord) GetMetadata() (*types.Metadata, error) { + return getMetadata(mediaRecord.client, mediaRecord.MediaRecord.HREF) +} + +// AddMetadata adds metadata key/value pair provided as input. +func (mediaRecord *MediaRecord) AddMetadata(key string, value string) (*MediaRecord, error) { + task, err := mediaRecord.AddMetadataAsync(key, value) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("error completing add metadata for media item task: %s", err) + } + + err = mediaRecord.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing media item: %s", err) + } + + return mediaRecord, nil +} + +// AddMetadataAsync calls private function addMetadata() with MediaRecord.client and MediaRecord.MediaRecord.HREF +// which adds metadata key/value pair provided as input. +func (mediaRecord *MediaRecord) AddMetadataAsync(key string, value string) (Task, error) { + return addMetadata(mediaRecord.client, key, value, mediaRecord.MediaRecord.HREF) +} + +// DeleteMetadata deletes metadata depending on key provided as input from media item. +func (mediaRecord *MediaRecord) DeleteMetadata(key string) error { + task, err := mediaRecord.DeleteMetadataAsync(key) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error completing delete metadata for media item task: %s", err) + } + + return nil +} + +// DeleteMetadataAsync calls private function deleteMetadata() with MediaRecord.client and MediaRecord.MediaRecord.HREF +// which deletes metadata depending on key provided as input from media item. +func (mediaRecord *MediaRecord) DeleteMetadataAsync(key string) (Task, error) { + return deleteMetadata(mediaRecord.client, key, mediaRecord.MediaRecord.HREF) +} + +// GetMetadata calls private function getMetadata() with Media.client and Media.Media.HREF +// which returns a *types.Metadata struct for provided media item input. +func (media *Media) GetMetadata() (*types.Metadata, error) { + return getMetadata(media.client, media.Media.HREF) +} + +// AddMetadata adds metadata key/value pair provided as input. +func (media *Media) AddMetadata(key string, value string) (*Media, error) { + task, err := media.AddMetadataAsync(key, value) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("error completing add metadata for media item task: %s", err) + } + + err = media.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing media item: %s", err) + } + + return media, nil +} + +// AddMetadataAsync calls private function addMetadata() with Media.client and Media.Media.HREF +// which adds metadata key/value pair provided as input. +func (media *Media) AddMetadataAsync(key string, value string) (Task, error) { + return addMetadata(media.client, key, value, media.Media.HREF) +} + +// DeleteMetadata deletes metadata depending on key provided as input from media item. +func (media *Media) DeleteMetadata(key string) error { + task, err := media.DeleteMetadataAsync(key) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error completing delete metadata for media item task: %s", err) + } + + return nil +} + +// DeleteMetadataAsync calls private function deleteMetadata() with Media.client and Media.Media.HREF +// which deletes metadata depending on key provided as input from media item. +func (media *Media) DeleteMetadataAsync(key string) (Task, error) { + return deleteMetadata(media.client, key, media.Media.HREF) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/monitor.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/monitor.go new file mode 100644 index 000000000..c20a02688 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/monitor.go @@ -0,0 +1,313 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +// Contains auxiliary functions to show library entities structure. +// Used for debugging and testing. +package govcd + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// For each library {entity}, we have two functions: Show{Entity} and Log{Entity} +// The first one shows the contents of the entity on screen +// The second one logs into the default util.Logger +// Both functions use JSON as the entity format + +// Available entities: +// org +// adminOrg +// vdc +// catalog +// catalogItem +// adminCatalog +// network +// externalNetwork +// vapp +// vm +// task +// Edge Gateway service configuration + +func out(destination, format string, args ...interface{}) { + switch destination { + case "screen": + fmt.Printf(format, args...) + case "log": + util.Logger.Printf(format, args...) + default: + fmt.Printf("Unhandled destination: %s\n", destination) + } +} + +// Returns a vApp structure as JSON +func prettyVapp(vapp types.VApp) string { + byteBuf, err := json.MarshalIndent(vapp, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a VM structure as JSON +func prettyVm(vm types.Vm) string { + byteBuf, err := json.MarshalIndent(vm, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an OrgUser structure as JSON +func prettyUser(user types.User) string { + byteBuf, err := json.MarshalIndent(user, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a VDC structure as JSON +func prettyVdc(vdc types.Vdc) string { + byteBuf, err := json.MarshalIndent(vdc, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a Catalog Item structure as JSON +func prettyCatalogItem(catalogItem types.CatalogItem) string { + byteBuf, err := json.MarshalIndent(catalogItem, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a Catalog structure as JSON +func prettyCatalog(catalog types.Catalog) string { + byteBuf, err := json.MarshalIndent(catalog, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an Admin Catalog structure as JSON +func prettyAdminCatalog(catalog types.AdminCatalog) string { + byteBuf, err := json.MarshalIndent(catalog, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an Org structure as JSON +func prettyOrg(org types.Org) string { + byteBuf, err := json.MarshalIndent(org, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an Admin Org structure as JSON +func prettyAdminOrg(org types.AdminOrg) string { + byteBuf, err := json.MarshalIndent(org, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a Disk structure as JSON +func prettyDisk(disk types.Disk) string { + byteBuf, err := json.MarshalIndent(disk, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an External Network structure as JSON +func prettyExternalNetwork(network types.ExternalNetwork) string { + byteBuf, err := json.MarshalIndent(network, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a Network structure as JSON +func prettyNetworkConf(conf types.OrgVDCNetwork) string { + byteBuf, err := json.MarshalIndent(conf, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns a Task structure as JSON +func prettyTask(task *types.Task) string { + byteBuf, err := json.MarshalIndent(task, " ", " ") + if err == nil { + return fmt.Sprintf("%s\n", string(byteBuf)) + } + return "" +} + +// Returns an Edge Gateway service configuration structure as JSON +//func prettyEdgeGatewayServiceConfiguration(conf types.EdgeGatewayServiceConfiguration) string { +func prettyEdgeGateway(egw types.EdgeGateway) string { + result := "" + byteBuf, err := json.MarshalIndent(egw, " ", " ") + if err == nil { + result += fmt.Sprintf("%s\n", string(byteBuf)) + } + return result +} + +func LogNetwork(conf types.OrgVDCNetwork) { + out("log", prettyNetworkConf(conf)) +} + +func ShowNetwork(conf types.OrgVDCNetwork) { + out("screen", prettyNetworkConf(conf)) +} + +func LogExternalNetwork(network types.ExternalNetwork) { + out("log", prettyExternalNetwork(network)) +} + +func ShowExternalNetwork(network types.ExternalNetwork) { + out("screen", prettyExternalNetwork(network)) +} + +func LogVapp(vapp types.VApp) { + out("log", prettyVapp(vapp)) +} + +func ShowVapp(vapp types.VApp) { + out("screen", prettyVapp(vapp)) +} + +func LogVm(vm types.Vm) { + out("log", prettyVm(vm)) +} + +func ShowVm(vm types.Vm) { + out("screen", prettyVm(vm)) +} +func ShowOrg(org types.Org) { + out("screen", prettyOrg(org)) +} + +func LogOrg(org types.Org) { + out("log", prettyOrg(org)) +} + +func ShowAdminOrg(org types.AdminOrg) { + out("screen", prettyAdminOrg(org)) +} + +func LogAdminOrg(org types.AdminOrg) { + out("log", prettyAdminOrg(org)) +} + +func ShowVdc(vdc types.Vdc) { + out("screen", prettyVdc(vdc)) +} + +func LogVdc(vdc types.Vdc) { + out("log", prettyVdc(vdc)) +} + +func ShowUser(user types.User) { + out("screen", prettyUser(user)) +} + +func LogUser(user types.User) { + out("log", prettyUser(user)) +} + +func ShowDisk(disk types.Disk) { + out("screen", prettyDisk(disk)) +} + +func LogDisk(disk types.Disk) { + out("log", prettyDisk(disk)) +} +func ShowCatalog(catalog types.Catalog) { + out("screen", prettyCatalog(catalog)) +} + +func LogCatalog(catalog types.Catalog) { + out("log", prettyCatalog(catalog)) +} + +func ShowCatalogItem(catalogItem types.CatalogItem) { + out("screen", prettyCatalogItem(catalogItem)) +} + +func LogCatalogItem(catalogItem types.CatalogItem) { + out("log", prettyCatalogItem(catalogItem)) +} + +func ShowAdminCatalog(catalog types.AdminCatalog) { + out("screen", prettyAdminCatalog(catalog)) +} + +func LogAdminCatalog(catalog types.AdminCatalog) { + out("log", prettyAdminCatalog(catalog)) +} + +func LogEdgeGateway(edgeGateway types.EdgeGateway) { + out("log", prettyEdgeGateway(edgeGateway)) +} + +func ShowEdgeGateway(edgeGateway types.EdgeGateway) { + out("screen", prettyEdgeGateway(edgeGateway)) +} + +// Auxiliary function to monitor a task +// It can be used in association with WaitInspectTaskCompletion +func outTask(destination string, task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + if task == nil { + out(destination, "Task is null\n") + return + } + out(destination, prettyTask(task)) + + out(destination, "progress: [%s:%d] %d%%\n", elapsed.Round(1*time.Second), howManyTimes, task.Progress) + out(destination, "-------------------------------\n") +} + +func simpleOutTask(destination string, task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + if task == nil { + out(destination, "Task is null\n") + return + } + out(destination, "%s (%s) - elapsed: [%s:%d] - progress: %d%%\n", task.OperationName, task.Status, elapsed.Round(1*time.Second), howManyTimes, task.Progress) +} + +func LogTask(task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + outTask("log", task, howManyTimes, elapsed, first, last) +} + +func ShowTask(task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + outTask("screen", task, howManyTimes, elapsed, first, last) +} + +func SimpleShowTask(task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + simpleOutTask("screen", task, howManyTimes, elapsed, first, last) +} + +func SimpleLogTask(task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) { + simpleOutTask("log", task, howManyTimes, elapsed, first, last) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_clouds.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_clouds.go new file mode 100644 index 000000000..bd6e8c43e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_clouds.go @@ -0,0 +1,188 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbCloud helps to use the virtual infrastructure provided by NSX Advanced Load Balancer, register NSX-T Cloud +// instances with VMware Cloud Director by consuming NsxtAlbImportableCloud. +type NsxtAlbCloud struct { + NsxtAlbCloud *types.NsxtAlbCloud + vcdClient *VCDClient +} + +// GetAllAlbClouds returns all configured NSX-T ALB Clouds +func (vcdClient *VCDClient) GetAllAlbClouds(queryParameters url.Values) ([]*NsxtAlbCloud, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Clouds require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbCloud + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAlbCloud{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtAlbCloud types with client + wrappedResponses := make([]*NsxtAlbCloud, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbCloud{ + NsxtAlbCloud: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbCloudByName returns NSX-T ALB Cloud by name +func (vcdClient *VCDClient) GetAlbCloudByName(name string) (*NsxtAlbCloud, error) { + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "name=="+name) + + albClouds, err := vcdClient.GetAllAlbClouds(queryParameters) + if err != nil { + return nil, fmt.Errorf("error reading NSX-T ALB Cloud with Name '%s': %s", name, err) + } + + if len(albClouds) == 0 { + return nil, fmt.Errorf("%s could not find NSX-T ALB Cloud with Name '%s'", ErrorEntityNotFound, name) + } + + if len(albClouds) > 1 { + return nil, fmt.Errorf("found more than 1 NSX-T ALB Cloud with Name '%s'", name) + } + + return albClouds[0], nil +} + +// GetAlbCloudById returns NSX-T ALB Cloud by ID +// +// Note. This function uses server side filtering instead of directly querying endpoint with specified ID because such +// endpoint does not exist +func (vcdClient *VCDClient) GetAlbCloudById(id string) (*NsxtAlbCloud, error) { + + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "id=="+id) + + albCloud, err := vcdClient.GetAllAlbClouds(queryParameters) + if err != nil { + return nil, fmt.Errorf("error reading NSX-T ALB Cloud with ID '%s': %s", id, err) + } + + if len(albCloud) == 0 { + return nil, fmt.Errorf("%s could not find NSX-T ALB Cloud by ID '%s'", ErrorEntityNotFound, id) + } + + return albCloud[0], nil +} + +// CreateAlbCloud creates NSX-T ALB Cloud +func (vcdClient *VCDClient) CreateAlbCloud(albCloudConfig *types.NsxtAlbCloud) (*NsxtAlbCloud, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Clouds require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbCloud + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbCloud{ + NsxtAlbCloud: &types.NsxtAlbCloud{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, albCloudConfig, returnObject.NsxtAlbCloud, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Cloud: %s", err) + } + + return returnObject, nil +} + +// Update is not supported in VCD 10.3 and older therefore this function remains commented +// +// Update updates existing NSX-T ALB Cloud with new supplied albCloudConfig configuration +//func (nsxtAlbCloud *NsxtAlbCloud) Update(albCloudConfig *types.NsxtAlbCloud) (*NsxtAlbCloud, error) { +// client := nsxtAlbCloud.vcdClient.Client +// endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbCloud +// minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) +// if err != nil { +// return nil, err +// } +// +// if albCloudConfig.ID == "" { +// return nil, fmt.Errorf("cannot update NSX-T ALB Cloud without ID") +// } +// +// urlRef, err := client.OpenApiBuildEndpoint(endpoint, albCloudConfig.ID) +// if err != nil { +// return nil, err +// } +// +// responseAlbCloud := &NsxtAlbCloud{ +// NsxtAlbCloud: &types.NsxtAlbCloud{}, +// vcdClient: nsxtAlbCloud.vcdClient, +// } +// +// err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, albCloudConfig, responseAlbCloud.NsxtAlbCloud, nil) +// if err != nil { +// return nil, fmt.Errorf("error updating NSX-T ALB Cloud: %s", err) +// } +// +// return responseAlbCloud, nil +//} + +// Delete removes NSX-T ALB Cloud configuration +func (nsxtAlbCloud *NsxtAlbCloud) Delete() error { + client := nsxtAlbCloud.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbCloud + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbCloud.NsxtAlbCloud.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Cloud without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbCloud.NsxtAlbCloud.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Cloud: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_controllers.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_controllers.go new file mode 100644 index 000000000..1cd144c98 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_controllers.go @@ -0,0 +1,232 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbController helps to integrate VMware Cloud Director with NSX-T Advanced Load Balancer deployment. +// Controller instances are registered with VMware Cloud Director instance. Controller instances serve as a central +// control plane for the load-balancing services provided by NSX-T Advanced Load Balancer. +// To configure an NSX-T ALB one needs to supply AVI Controller endpoint, credentials and license to be used. +type NsxtAlbController struct { + NsxtAlbController *types.NsxtAlbController + vcdClient *VCDClient +} + +// GetAllAlbControllers returns all configured NSX-T ALB Controllers +func (vcdClient *VCDClient) GetAllAlbControllers(queryParameters url.Values) ([]*NsxtAlbController, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("reading NSX-T ALB Controllers require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAlbController{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtAlbController types with client + wrappedResponses := make([]*NsxtAlbController, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbController{ + NsxtAlbController: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbControllerByName returns NSX-T ALB Controller by Name +func (vcdClient *VCDClient) GetAlbControllerByName(name string) (*NsxtAlbController, error) { + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "name=="+name) + + controllers, err := vcdClient.GetAllAlbControllers(queryParameters) + if err != nil { + return nil, fmt.Errorf("error reading ALB Controller with Name '%s': %s", name, err) + } + + if len(controllers) == 0 { + return nil, fmt.Errorf("%s: could not find ALB Controller with Name '%s'", ErrorEntityNotFound, name) + } + + if len(controllers) > 1 { + return nil, fmt.Errorf("found more than 1 ALB Controller with Name '%s'", name) + } + + return controllers[0], nil +} + +// GetAlbControllerById returns NSX-T ALB Controller by ID +func (vcdClient *VCDClient) GetAlbControllerById(id string) (*NsxtAlbController, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("reading NSX-T ALB Controllers require System user") + } + + if id == "" { + return nil, fmt.Errorf("ID is required to lookup NSX-T ALB Controller by ID") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbController{} + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + wrappedResponse := &NsxtAlbController{ + NsxtAlbController: typeResponse, + vcdClient: vcdClient, + } + + return wrappedResponse, nil +} + +// GetAlbControllerByUrl returns configured ALB Controller by URL +// +// Note. Filtering is performed on client side. +func (vcdClient *VCDClient) GetAlbControllerByUrl(url string) (*NsxtAlbController, error) { + // Ideally this function could filter on VCD side, but API does not support filtering on URL + controllers, err := vcdClient.GetAllAlbControllers(nil) + if err != nil { + return nil, fmt.Errorf("error reading ALB Controller with Url '%s': %s", url, err) + } + + // Search for controllers + filteredControllers := make([]*NsxtAlbController, 0) + for _, controller := range controllers { + if controller.NsxtAlbController.Url == url { + filteredControllers = append(filteredControllers, controller) + } + } + + if len(filteredControllers) == 0 { + return nil, fmt.Errorf("%s could not find ALB Controller by Url '%s'", ErrorEntityNotFound, url) + } + + if len(filteredControllers) > 1 { + return nil, fmt.Errorf("found more than 1 ALB Controller by Url '%s'", url) + } + + return filteredControllers[0], nil +} + +// CreateNsxtAlbController creates controller with supplied albControllerConfig configuration +func (vcdClient *VCDClient) CreateNsxtAlbController(albControllerConfig *types.NsxtAlbController) (*NsxtAlbController, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Controllers require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbController{ + NsxtAlbController: &types.NsxtAlbController{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, albControllerConfig, returnObject.NsxtAlbController, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Controller: %s", err) + } + + return returnObject, nil +} + +// Update updates existing NSX-T ALB Controller with new supplied albControllerConfig configuration +func (nsxtAlbController *NsxtAlbController) Update(albControllerConfig *types.NsxtAlbController) (*NsxtAlbController, error) { + client := nsxtAlbController.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if albControllerConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T ALB Controller without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, albControllerConfig.ID) + if err != nil { + return nil, err + } + + responseAlbController := &NsxtAlbController{ + NsxtAlbController: &types.NsxtAlbController{}, + vcdClient: nsxtAlbController.vcdClient, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, albControllerConfig, responseAlbController.NsxtAlbController, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T ALB Controller: %s", err) + } + + return responseAlbController, nil +} + +// Delete deletes existing NSX-T ALB Controller +func (nsxtAlbController *NsxtAlbController) Delete() error { + client := nsxtAlbController.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbController.NsxtAlbController.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Controller without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbController.NsxtAlbController.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Controller: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_clouds.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_clouds.go new file mode 100644 index 000000000..d239526d6 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_clouds.go @@ -0,0 +1,121 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbImportableCloud allows user to list importable NSX-T ALB Clouds. Each importable cloud can only be imported +// once by using NsxtAlbCloud construct. It has a flag AlreadyImported which hints if it is already consumed or not. +type NsxtAlbImportableCloud struct { + NsxtAlbImportableCloud *types.NsxtAlbImportableCloud + vcdClient *VCDClient +} + +// GetAllAlbImportableClouds returns importable NSX-T ALB Clouds. +// parentAlbControllerUrn (ID in URN format of a parent ALB Controller) is mandatory +func (vcdClient *VCDClient) GetAllAlbImportableClouds(parentAlbControllerUrn string, queryParameters url.Values) ([]*NsxtAlbImportableCloud, error) { + client := vcdClient.Client + if parentAlbControllerUrn == "" { + return nil, fmt.Errorf("parent ALB Controller ID is required") + } + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Importable Clouds require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbImportableClouds + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", parentAlbControllerUrn), queryParams) + typeResponses := []*types.NsxtAlbImportableCloud{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + wrappedResponses := make([]*NsxtAlbImportableCloud, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbImportableCloud{ + NsxtAlbImportableCloud: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbImportableCloudByName returns importable NSX-T ALB Clouds. +func (vcdClient *VCDClient) GetAlbImportableCloudByName(parentAlbControllerUrn, name string) (*NsxtAlbImportableCloud, error) { + albImportableClouds, err := vcdClient.GetAllAlbImportableClouds(parentAlbControllerUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Cloud by Name '%s': %s", name, err) + } + + // Filtering by Name is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundAlbImportableCloud *NsxtAlbImportableCloud + for i, value := range albImportableClouds { + if albImportableClouds[i].NsxtAlbImportableCloud.DisplayName == name { + foundResult = true + foundAlbImportableCloud = value + break + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Cloud by Name %s", ErrorEntityNotFound, name) + } + + return foundAlbImportableCloud, nil +} + +// GetAlbImportableCloudById returns importable NSX-T ALB Clouds. +// Note. ID filtering is performed on client side +func (vcdClient *VCDClient) GetAlbImportableCloudById(parentAlbControllerUrn, id string) (*NsxtAlbImportableCloud, error) { + albImportableClouds, err := vcdClient.GetAllAlbImportableClouds(parentAlbControllerUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Cloud by ID '%s': %s", id, err) + } + + // Filtering by ID is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundAlbImportableCloud *NsxtAlbImportableCloud + for i, value := range albImportableClouds { + if albImportableClouds[i].NsxtAlbImportableCloud.ID == id { + foundResult = true + foundAlbImportableCloud = value + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Cloud by ID %s", ErrorEntityNotFound, id) + } + + return foundAlbImportableCloud, nil +} + +// GetAllAlbImportableClouds is attached to NsxtAlbController type for a convenient parent/child relationship +func (nsxtAlbController *NsxtAlbController) GetAllAlbImportableClouds(queryParameters url.Values) ([]*NsxtAlbImportableCloud, error) { + return nsxtAlbController.vcdClient.GetAllAlbImportableClouds(nsxtAlbController.NsxtAlbController.ID, queryParameters) +} + +// GetAlbImportableCloudByName is attached to NsxtAlbController type for a convenient parent/child relationship +func (nsxtAlbController *NsxtAlbController) GetAlbImportableCloudByName(name string) (*NsxtAlbImportableCloud, error) { + return nsxtAlbController.vcdClient.GetAlbImportableCloudByName(nsxtAlbController.NsxtAlbController.ID, name) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_service_engine_groups.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_service_engine_groups.go new file mode 100644 index 000000000..75e79f7c5 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_importable_service_engine_groups.go @@ -0,0 +1,202 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbImportableServiceEngineGroups provides capability to list all Importable Service Engine Groups available in +// ALB Controller so that they can be consumed by NsxtAlbServiceEngineGroup +// +// Note. The API does not return Importable Service Engine Group once it is consumed. +type NsxtAlbImportableServiceEngineGroups struct { + NsxtAlbImportableServiceEngineGroups *types.NsxtAlbImportableServiceEngineGroups + vcdClient *VCDClient +} + +// GetAllAlbImportableServiceEngineGroups lists all Importable Service Engine Groups available in ALB Controller +func (vcdClient *VCDClient) GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn string, queryParameters url.Values) ([]*NsxtAlbImportableServiceEngineGroups, error) { + client := vcdClient.Client + if parentAlbCloudUrn == "" { + return nil, fmt.Errorf("parentAlbCloudUrn is required") + } + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Importable Service Engine Groups requires System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbImportableServiceEngineGroups + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", parentAlbCloudUrn), queryParams) + typeResponses := []*types.NsxtAlbImportableServiceEngineGroups{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + wrappedResponses := make([]*NsxtAlbImportableServiceEngineGroups, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbImportableServiceEngineGroups{ + NsxtAlbImportableServiceEngineGroups: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbImportableServiceEngineGroupByName returns importable NSX-T ALB Clouds. +func (vcdClient *VCDClient) GetAlbImportableServiceEngineGroupByName(parentAlbCloudUrn, name string) (*NsxtAlbImportableServiceEngineGroups, error) { + albClouds, err := vcdClient.GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Service Engine Group by Name '%s': %s", name, err) + } + + // Filtering by Name is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundAlbCloud *NsxtAlbImportableServiceEngineGroups + for i, value := range albClouds { + if albClouds[i].NsxtAlbImportableServiceEngineGroups.DisplayName == name { + foundResult = true + foundAlbCloud = value + break + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Service Engine Group by Name %s", ErrorEntityNotFound, name) + } + + return foundAlbCloud, nil +} + +// GetAlbImportableServiceEngineGroupById +// Note. ID filtering is performed on client side +func (vcdClient *VCDClient) GetAlbImportableServiceEngineGroupById(parentAlbCloudUrn, id string) (*NsxtAlbImportableServiceEngineGroups, error) { + albClouds, err := vcdClient.GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Service Engine Group by ID '%s': %s", id, err) + } + + // Filtering by ID is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundImportableSEGroups *NsxtAlbImportableServiceEngineGroups + for i, value := range albClouds { + if albClouds[i].NsxtAlbImportableServiceEngineGroups.ID == id { + foundResult = true + foundImportableSEGroups = value + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Service Engine Group by ID %s", ErrorEntityNotFound, id) + } + + return foundImportableSEGroups, nil +} + +// GetAllAlbImportableServiceEngineGroups lists all Importable Service Engine Groups available in ALB Controller +func (nsxtAlbCloud *NsxtAlbCloud) GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn string, queryParameters url.Values) ([]*NsxtAlbImportableServiceEngineGroups, error) { + client := nsxtAlbCloud.vcdClient.Client + if parentAlbCloudUrn == "" { + return nil, fmt.Errorf("parentAlbCloudUrn is required") + } + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Importable Service Engine Groups requires System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbImportableServiceEngineGroups + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", parentAlbCloudUrn), queryParams) + typeResponses := []*types.NsxtAlbImportableServiceEngineGroups{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + wrappedResponses := make([]*NsxtAlbImportableServiceEngineGroups, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbImportableServiceEngineGroups{ + NsxtAlbImportableServiceEngineGroups: typeResponses[sliceIndex], + vcdClient: nsxtAlbCloud.vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbImportableServiceEngineGroupByName returns importable NSX-T ALB Clouds. +func (nsxtAlbCloud *NsxtAlbCloud) GetAlbImportableServiceEngineGroupByName(parentAlbCloudUrn, name string) (*NsxtAlbImportableServiceEngineGroups, error) { + albClouds, err := nsxtAlbCloud.vcdClient.GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Service Engine Group by Name '%s': %s", name, err) + } + + // Filtering by ID is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundAlbCloud *NsxtAlbImportableServiceEngineGroups + for i, value := range albClouds { + if albClouds[i].NsxtAlbImportableServiceEngineGroups.DisplayName == name { + foundResult = true + foundAlbCloud = value + break + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Service Engine Group by Name %s", ErrorEntityNotFound, name) + } + + return foundAlbCloud, nil +} + +// GetAlbImportableServiceEngineGroupById +// Note. ID filtering is performed on client side +func (nsxtAlbCloud *NsxtAlbCloud) GetAlbImportableServiceEngineGroupById(parentAlbCloudUrn, id string) (*NsxtAlbImportableServiceEngineGroups, error) { + albClouds, err := nsxtAlbCloud.vcdClient.GetAllAlbImportableServiceEngineGroups(parentAlbCloudUrn, nil) + if err != nil { + return nil, fmt.Errorf("error finding NSX-T ALB Importable Service Engine Group by ID '%s': %s", id, err) + } + + // Filtering by ID is not supported by API therefore it must be filtered on client side + var foundResult bool + var foundImportableSEGroups *NsxtAlbImportableServiceEngineGroups + for i, value := range albClouds { + if albClouds[i].NsxtAlbImportableServiceEngineGroups.ID == id { + foundResult = true + foundImportableSEGroups = value + } + } + + if !foundResult { + return nil, fmt.Errorf("%s: could not find NSX-T ALB Importable Service Engine Group by ID %s", ErrorEntityNotFound, id) + } + + return foundImportableSEGroups, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_pool.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_pool.go new file mode 100644 index 000000000..e592e07db --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_pool.go @@ -0,0 +1,215 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbPool defines configuration of a single NSX-T ALB Pool. Pools maintain the list of servers assigned to them and +// perform health monitoring, load balancing, persistence. A pool may only be used or referenced by only one virtual +// service at a time. +type NsxtAlbPool struct { + NsxtAlbPool *types.NsxtAlbPool + vcdClient *VCDClient +} + +// GetAllAlbPoolSummaries retrieves partial information for type `NsxtAlbPool`, but it is the only way to retrieve all ALB +// pools for Edge Gateway +func (vcdClient *VCDClient) GetAllAlbPoolSummaries(edgeGatewayId string, queryParameters url.Values) ([]*NsxtAlbPool, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPoolSummaries + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, edgeGatewayId)) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAlbPool{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtAlbPool types with client + wrappedResponses := make([]*NsxtAlbPool, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbPool{ + NsxtAlbPool: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAllAlbPools uses GetAllAlbPoolSummaries behind the scenes and the fetches complete data for all ALB Pools. This +// has performance penalty because each ALB Pool is fetched individually. +func (vcdClient *VCDClient) GetAllAlbPools(edgeGatewayId string, queryParameters url.Values) ([]*NsxtAlbPool, error) { + allAlbPoolSummaries, err := vcdClient.GetAllAlbPoolSummaries(edgeGatewayId, queryParameters) + if err != nil { + return nil, fmt.Errorf("error retrieving all ALB Pool summaries: %s", err) + } + + // Loop over all Summaries and retrieve complete information + allAlbPools := make([]*NsxtAlbPool, len(allAlbPoolSummaries)) + for index := range allAlbPoolSummaries { + + allAlbPools[index], err = vcdClient.GetAlbPoolById(allAlbPoolSummaries[index].NsxtAlbPool.ID) + if err != nil { + return nil, fmt.Errorf("error retrieving complete ALB Pool: %s", err) + } + + } + + return allAlbPools, nil +} + +// GetAlbPoolByName fetches ALB Pool By Name +func (vcdClient *VCDClient) GetAlbPoolByName(edgeGatewayId string, name string) (*NsxtAlbPool, error) { + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "name=="+name) + + allAlbPools, err := vcdClient.GetAllAlbPools(edgeGatewayId, queryParameters) + if err != nil { + return nil, fmt.Errorf("error retrieving ALB Pool with Name '%s': %s", name, err) + } + + if len(allAlbPools) == 0 { + return nil, fmt.Errorf("%s: could not find ALB Pool with Name '%s'", ErrorEntityNotFound, name) + } + + if len(allAlbPools) > 1 { + return nil, fmt.Errorf("found more than 1 ALB Pool with Name '%s'", name) + } + + return allAlbPools[0], nil +} + +// GetAlbPoolById fetches ALB Pool By Id +func (vcdClient *VCDClient) GetAlbPoolById(id string) (*NsxtAlbPool, error) { + client := vcdClient.Client + + if id == "" { + return nil, fmt.Errorf("ID is required to lookup NSX-T ALB Pool by ID") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPools + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbPool{} + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + wrappedResponse := &NsxtAlbPool{ + NsxtAlbPool: typeResponse, + vcdClient: vcdClient, + } + + return wrappedResponse, nil +} + +// CreateNsxtAlbPool creates NSX-T ALB Pool based on supplied configuration +func (vcdClient *VCDClient) CreateNsxtAlbPool(albPoolConfig *types.NsxtAlbPool) (*NsxtAlbPool, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPools + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbPool{ + NsxtAlbPool: &types.NsxtAlbPool{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, albPoolConfig, returnObject.NsxtAlbPool, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Pool: %s", err) + } + + return returnObject, nil +} + +// Update updates NSX-T ALB Pool based on supplied configuration +func (nsxtAlbPool *NsxtAlbPool) Update(albPoolConfig *types.NsxtAlbPool) (*NsxtAlbPool, error) { + client := nsxtAlbPool.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPools + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if albPoolConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T ALB Pool without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, albPoolConfig.ID) + if err != nil { + return nil, err + } + + responseAlbController := &NsxtAlbPool{ + NsxtAlbPool: &types.NsxtAlbPool{}, + vcdClient: nsxtAlbPool.vcdClient, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, albPoolConfig, responseAlbController.NsxtAlbPool, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T ALB Pool: %s", err) + } + + return responseAlbController, nil +} + +// Delete deletes NSX-T ALB Pool +func (nsxtAlbPool *NsxtAlbPool) Delete() error { + client := nsxtAlbPool.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPools + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbPool.NsxtAlbPool.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Pool without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbPool.NsxtAlbPool.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Pool: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_group_assignment.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_group_assignment.go new file mode 100644 index 000000000..f9b84a535 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_group_assignment.go @@ -0,0 +1,213 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbServiceEngineGroupAssignment handles Service Engine Group Assignment to NSX-T Edge Gateways +type NsxtAlbServiceEngineGroupAssignment struct { + NsxtAlbServiceEngineGroupAssignment *types.NsxtAlbServiceEngineGroupAssignment + vcdClient *VCDClient +} + +func (vcdClient *VCDClient) GetAllAlbServiceEngineGroupAssignments(queryParameters url.Values) ([]*NsxtAlbServiceEngineGroupAssignment, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAlbServiceEngineGroupAssignment{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + wrappedResponses := make([]*NsxtAlbServiceEngineGroupAssignment, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbServiceEngineGroupAssignment{ + NsxtAlbServiceEngineGroupAssignment: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +func (vcdClient *VCDClient) GetAlbServiceEngineGroupAssignmentById(id string) (*NsxtAlbServiceEngineGroupAssignment, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbServiceEngineGroupAssignment{} + + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + wrappedResponse := &NsxtAlbServiceEngineGroupAssignment{ + NsxtAlbServiceEngineGroupAssignment: typeResponse, + vcdClient: vcdClient, + } + + return wrappedResponse, nil +} + +func (vcdClient *VCDClient) GetAlbServiceEngineGroupAssignmentByName(name string) (*NsxtAlbServiceEngineGroupAssignment, error) { + // Filtering by Service Engine Group name is not supported on API therefore filtering is done locally + allServiceEngineGroupAssignments, err := vcdClient.GetAllAlbServiceEngineGroupAssignments(nil) + if err != nil { + return nil, err + } + + var foundGroup *NsxtAlbServiceEngineGroupAssignment + + for _, serviceEngineGroupAssignment := range allServiceEngineGroupAssignments { + if serviceEngineGroupAssignment.NsxtAlbServiceEngineGroupAssignment.ServiceEngineGroupRef.Name == name { + foundGroup = serviceEngineGroupAssignment + } + } + + if foundGroup == nil { + return nil, ErrorEntityNotFound + } + + return foundGroup, nil +} + +// GetFilteredAlbServiceEngineGroupAssignmentByName will get all ALB Service Engine Group assignments based on filters +// provided in queryParameters additionally will filter by name locally because VCD does not support server side +// filtering by name. +func (vcdClient *VCDClient) GetFilteredAlbServiceEngineGroupAssignmentByName(name string, queryParameters url.Values) (*NsxtAlbServiceEngineGroupAssignment, error) { + // Filtering by Service Engine Group name is not supported on API therefore filtering is done locally + allServiceEngineGroupAssignments, err := vcdClient.GetAllAlbServiceEngineGroupAssignments(queryParameters) + if err != nil { + return nil, err + } + + var foundGroup *NsxtAlbServiceEngineGroupAssignment + + for _, serviceEngineGroupAssignment := range allServiceEngineGroupAssignments { + if serviceEngineGroupAssignment.NsxtAlbServiceEngineGroupAssignment.ServiceEngineGroupRef.Name == name { + foundGroup = serviceEngineGroupAssignment + } + } + + if foundGroup == nil { + return nil, ErrorEntityNotFound + } + + return foundGroup, nil +} + +func (vcdClient *VCDClient) CreateAlbServiceEngineGroupAssignment(assignmentConfig *types.NsxtAlbServiceEngineGroupAssignment) (*NsxtAlbServiceEngineGroupAssignment, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Service Engine Group Assignment require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbServiceEngineGroupAssignment{ + NsxtAlbServiceEngineGroupAssignment: &types.NsxtAlbServiceEngineGroupAssignment{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, assignmentConfig, returnObject.NsxtAlbServiceEngineGroupAssignment, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Service Engine Group Assignment: %s", err) + } + + return returnObject, nil +} + +// Update updates existing ALB Service Engine Group Assignment with new supplied assignmentConfig configuration +func (nsxtEdgeAlbServiceEngineGroup *NsxtAlbServiceEngineGroupAssignment) Update(assignmentConfig *types.NsxtAlbServiceEngineGroupAssignment) (*NsxtAlbServiceEngineGroupAssignment, error) { + client := nsxtEdgeAlbServiceEngineGroup.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if assignmentConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T ALB Service Engine Group Assignment without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, assignmentConfig.ID) + if err != nil { + return nil, err + } + + responseAlbController := &NsxtAlbServiceEngineGroupAssignment{ + NsxtAlbServiceEngineGroupAssignment: &types.NsxtAlbServiceEngineGroupAssignment{}, + vcdClient: nsxtEdgeAlbServiceEngineGroup.vcdClient, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, assignmentConfig, responseAlbController.NsxtAlbServiceEngineGroupAssignment, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T ALB Service Engine Group Assignment: %s", err) + } + + return responseAlbController, nil +} + +// Delete deletes NSX-T ALB Service Engine Group Assignment +func (nsxtEdgeAlbServiceEngineGroup *NsxtAlbServiceEngineGroupAssignment) Delete() error { + client := nsxtEdgeAlbServiceEngineGroup.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtEdgeAlbServiceEngineGroup.NsxtAlbServiceEngineGroupAssignment.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Service Engine Group Assignment without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtEdgeAlbServiceEngineGroup.NsxtAlbServiceEngineGroupAssignment.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Service Engine Group Assignment: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_groups.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_groups.go new file mode 100644 index 000000000..b0fe9b70b --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_service_engine_groups.go @@ -0,0 +1,256 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbServiceEngineGroup provides virtual service management capabilities for tenants. This entity can be created +// by referencing a backing importable service engine group - NsxtAlbImportableServiceEngineGroups. +// +// A service engine group is an isolation domain that also defines shared service engine properties, such as size, +// network access, and failover. Resources in a service engine group can be used for different virtual services, +// depending on your tenant needs. These resources cannot be shared between different service engine groups. +type NsxtAlbServiceEngineGroup struct { + NsxtAlbServiceEngineGroup *types.NsxtAlbServiceEngineGroup + vcdClient *VCDClient +} + +// GetAllAlbServiceEngineGroups retrieves NSX-T ALB Service Engines with possible filters +// +// Context is not mandatory for this resource. Supported contexts are: +// * Gateway ID (_context==gatewayId) - returns all Load Balancer Service Engine Groups that are accessible to the +// gateway. +// * Assignable Gateway ID (_context=gatewayId;_context==assignable) returns all Load Balancer Service Engine Groups +// that are assignable to the gateway. This filters out any Load Balancer Service Engine groups that are already +// assigned to the gateway or assigned to another gateway if the reservation type is 'DEDICATED’. +func (vcdClient *VCDClient) GetAllAlbServiceEngineGroups(context string, queryParameters url.Values) ([]*NsxtAlbServiceEngineGroup, error) { + client := vcdClient.Client + + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Service Engine Groups require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + queryParams := copyOrNewUrlValues(queryParameters) + if context != "" { + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", context), queryParams) + } + typeResponses := []*types.NsxtAlbServiceEngineGroup{{}} + + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + wrappedResponses := make([]*NsxtAlbServiceEngineGroup, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbServiceEngineGroup{ + NsxtAlbServiceEngineGroup: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAlbServiceEngineGroupByName returns NSX-T ALB Service Engine by Name +// Context is not mandatory for this resource. Supported contexts are: +// * Gateway ID (_context==gatewayId) - returns all Load Balancer Service Engine Groups that are accessible to the +// gateway. +// * Assignable Gateway ID (_context=gatewayId;_context==assignable) returns all Load Balancer Service Engine Groups +// that are assignable to the gateway. This filters out any Load Balancer Service Engine groups that are already +// assigned to the gateway or assigned to another gateway if the reservation type is 'DEDICATED’. +func (vcdClient *VCDClient) GetAlbServiceEngineGroupByName(optionalContext, name string) (*NsxtAlbServiceEngineGroup, error) { + queryParams := copyOrNewUrlValues(nil) + if optionalContext != "" { + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", optionalContext), queryParams) + } + queryParams.Add("filter", fmt.Sprintf("name==%s", name)) + + albSeGroups, err := vcdClient.GetAllAlbServiceEngineGroups("", queryParams) + if err != nil { + return nil, fmt.Errorf("error retrieving NSX-T ALB Service Engine Group By Name '%s': %s", name, err) + } + + if len(albSeGroups) == 0 { + return nil, fmt.Errorf("%s", ErrorEntityNotFound) + } + + if len(albSeGroups) > 1 { + return nil, fmt.Errorf("more than 1 NSX-T ALB Service Engine Group with Name '%s' found", name) + } + + return albSeGroups[0], nil +} + +// GetAlbServiceEngineGroupById returns importable NSX-T ALB Cloud by ID +func (vcdClient *VCDClient) GetAlbServiceEngineGroupById(id string) (*NsxtAlbServiceEngineGroup, error) { + client := vcdClient.Client + + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Service Engine Groups require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbServiceEngineGroup{} + + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + wrappedResponse := &NsxtAlbServiceEngineGroup{ + NsxtAlbServiceEngineGroup: typeResponse, + vcdClient: vcdClient, + } + + return wrappedResponse, nil +} + +func (vcdClient *VCDClient) CreateNsxtAlbServiceEngineGroup(albServiceEngineGroup *types.NsxtAlbServiceEngineGroup) (*NsxtAlbServiceEngineGroup, error) { + client := vcdClient.Client + if !client.IsSysAdmin { + return nil, errors.New("handling NSX-T ALB Service Engine Groups require System user") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbServiceEngineGroup{ + NsxtAlbServiceEngineGroup: &types.NsxtAlbServiceEngineGroup{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, albServiceEngineGroup, returnObject.NsxtAlbServiceEngineGroup, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Service Engine Group: %s", err) + } + + return returnObject, nil +} + +// Update updates existing ALB Controller with new supplied albControllerConfig configuration +func (nsxtAlbServiceEngineGroup *NsxtAlbServiceEngineGroup) Update(albSEGroupConfig *types.NsxtAlbServiceEngineGroup) (*NsxtAlbServiceEngineGroup, error) { + client := nsxtAlbServiceEngineGroup.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if albSEGroupConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T ALB Service Engine Group without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, albSEGroupConfig.ID) + if err != nil { + return nil, err + } + + responseAlbController := &NsxtAlbServiceEngineGroup{ + NsxtAlbServiceEngineGroup: &types.NsxtAlbServiceEngineGroup{}, + vcdClient: nsxtAlbServiceEngineGroup.vcdClient, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, albSEGroupConfig, responseAlbController.NsxtAlbServiceEngineGroup, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T ALB Service Engine Group: %s", err) + } + + return responseAlbController, nil +} + +// Delete deletes NSX-T ALB Service Engine Group configuration +func (nsxtAlbServiceEngineGroup *NsxtAlbServiceEngineGroup) Delete() error { + client := nsxtAlbServiceEngineGroup.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbServiceEngineGroup.NsxtAlbServiceEngineGroup.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Service Engine Group without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbServiceEngineGroup.NsxtAlbServiceEngineGroup.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Service Engine Group: %s", err) + } + + return nil +} + +// Sync syncs a specified Load Balancer Service Engine Group. It requests the HA mode and the maximum number of +// supported Virtual Services for this Service Engine Group from the Load Balancer, and updates vCD's local record of +// these properties. +func (nsxtAlbServiceEngineGroup *NsxtAlbServiceEngineGroup) Sync() error { + client := nsxtAlbServiceEngineGroup.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbServiceEngineGroup.NsxtAlbServiceEngineGroup.ID == "" { + return fmt.Errorf("cannot sync NSX-T ALB Service Engine Group without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbServiceEngineGroup.NsxtAlbServiceEngineGroup.ID, "/sync") + if err != nil { + return err + } + + task, err := client.OpenApiPostItemAsync(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error syncing NSX-T ALB Service Engine Group: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("sync task for NSX-T ALB Service Engine Group failed: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_settings.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_settings.go new file mode 100644 index 000000000..b763f9edd --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_settings.go @@ -0,0 +1,66 @@ +package govcd + +import ( + "fmt" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// GetAlbSettings retrieves NSX-T ALB settings for a particular Edge Gateway +func (egw *NsxtEdgeGateway) GetAlbSettings() (*types.NsxtAlbConfig, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbEdgeGateway + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbConfig{} + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + return typeResponse, nil +} + +// UpdateAlbSettings updates NSX-T ALB settings for a particular Edge Gateway +func (egw *NsxtEdgeGateway) UpdateAlbSettings(config *types.NsxtAlbConfig) (*types.NsxtAlbConfig, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbEdgeGateway + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbConfig{} + err = client.OpenApiPutItem(apiVersion, urlRef, nil, config, typeResponse, nil) + if err != nil { + return nil, err + } + + return typeResponse, nil +} + +// DisableAlb is a shortcut wrapping UpdateAlbSettings which disables ALB configuration +func (egw *NsxtEdgeGateway) DisableAlb() error { + config := &types.NsxtAlbConfig{ + Enabled: false, + } + _, err := egw.UpdateAlbSettings(config) + if err != nil { + return fmt.Errorf("error disabling NSX-T ALB: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_virtual_service.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_virtual_service.go new file mode 100644 index 000000000..a59eca621 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_alb_virtual_service.go @@ -0,0 +1,214 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAlbVirtualService combines Load Balancer Pools with Service Engine Groups and exposes a virtual service on +// defined VIP (virtual IP address) while optionally allowing to use encrypted traffic +type NsxtAlbVirtualService struct { + NsxtAlbVirtualService *types.NsxtAlbVirtualService + vcdClient *VCDClient +} + +// GetAllAlbVirtualServiceSummaries returns a limited subset of NsxtAlbVirtualService values, but does it in single +// query. To fetch complete information for ALB Virtual Services one can use GetAllAlbVirtualServices(), but it is slower +// as it has to retrieve Virtual Services one by one. +func (vcdClient *VCDClient) GetAllAlbVirtualServiceSummaries(edgeGatewayId string, queryParameters url.Values) ([]*NsxtAlbVirtualService, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServiceSummaries + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, edgeGatewayId)) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAlbVirtualService{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtAlbPool types with client + wrappedResponses := make([]*NsxtAlbVirtualService, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAlbVirtualService{ + NsxtAlbVirtualService: typeResponses[sliceIndex], + vcdClient: vcdClient, + } + } + + return wrappedResponses, nil +} + +// GetAllAlbVirtualServices fetches ALB Virtual Services by at first listing all Virtual Services summaries and then +// fetching complete structure one by one +func (vcdClient *VCDClient) GetAllAlbVirtualServices(edgeGatewayId string, queryParameters url.Values) ([]*NsxtAlbVirtualService, error) { + allAlbVirtualServiceSummaries, err := vcdClient.GetAllAlbVirtualServiceSummaries(edgeGatewayId, queryParameters) + if err != nil { + return nil, fmt.Errorf("error retrieving all ALB Virtual Service summaries: %s", err) + } + + // Loop over all Summaries and retrieve complete information + allAlbVirtualServices := make([]*NsxtAlbVirtualService, len(allAlbVirtualServiceSummaries)) + for index := range allAlbVirtualServiceSummaries { + allAlbVirtualServices[index], err = vcdClient.GetAlbVirtualServiceById(allAlbVirtualServiceSummaries[index].NsxtAlbVirtualService.ID) + if err != nil { + return nil, fmt.Errorf("error retrieving complete ALB Virtual Service: %s", err) + } + + } + + return allAlbVirtualServices, nil +} + +// GetAlbVirtualServiceByName fetches ALB Virtual Service By Name +func (vcdClient *VCDClient) GetAlbVirtualServiceByName(edgeGatewayId string, name string) (*NsxtAlbVirtualService, error) { + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "name=="+name) + + allAlbVirtualServices, err := vcdClient.GetAllAlbVirtualServices(edgeGatewayId, queryParameters) + if err != nil { + return nil, fmt.Errorf("error reading ALB Virtual Service with Name '%s': %s", name, err) + } + + if len(allAlbVirtualServices) == 0 { + return nil, fmt.Errorf("%s: could not find ALB Virtual Service with Name '%s'", ErrorEntityNotFound, name) + } + + if len(allAlbVirtualServices) > 1 { + return nil, fmt.Errorf("found more than 1 ALB Virtual Service with Name '%s'", name) + } + + return allAlbVirtualServices[0], nil +} + +// GetAlbVirtualServiceById fetches ALB Virtual Service By ID +func (vcdClient *VCDClient) GetAlbVirtualServiceById(id string) (*NsxtAlbVirtualService, error) { + client := vcdClient.Client + + if id == "" { + return nil, fmt.Errorf("ID is required to lookup NSX-T ALB Virtual Service by ID") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServices + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + typeResponse := &types.NsxtAlbVirtualService{} + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &typeResponse, nil) + if err != nil { + return nil, err + } + + wrappedResponse := &NsxtAlbVirtualService{ + NsxtAlbVirtualService: typeResponse, + vcdClient: vcdClient, + } + + return wrappedResponse, nil +} + +// CreateNsxtAlbVirtualService creates NSX-T ALB Virtual Service based on supplied configuration +func (vcdClient *VCDClient) CreateNsxtAlbVirtualService(albVirtualServiceConfig *types.NsxtAlbVirtualService) (*NsxtAlbVirtualService, error) { + client := vcdClient.Client + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServices + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAlbVirtualService{ + NsxtAlbVirtualService: &types.NsxtAlbVirtualService{}, + vcdClient: vcdClient, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, albVirtualServiceConfig, returnObject.NsxtAlbVirtualService, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T ALB Virtual Service: %s", err) + } + + return returnObject, nil +} + +// Update updates NSX-T ALB Virtual Service based on supplied configuration +func (nsxtAlbVirtualService *NsxtAlbVirtualService) Update(albVirtualServiceConfig *types.NsxtAlbVirtualService) (*NsxtAlbVirtualService, error) { + client := nsxtAlbVirtualService.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServices + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if albVirtualServiceConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T ALB Virtual Service without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, albVirtualServiceConfig.ID) + if err != nil { + return nil, err + } + + responseAlbController := &NsxtAlbVirtualService{ + NsxtAlbVirtualService: &types.NsxtAlbVirtualService{}, + vcdClient: nsxtAlbVirtualService.vcdClient, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, albVirtualServiceConfig, responseAlbController.NsxtAlbVirtualService, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T ALB Virtual Service: %s", err) + } + + return responseAlbController, nil +} + +// Delete deletes NSX-T ALB Virtual Service +func (nsxtAlbVirtualService *NsxtAlbVirtualService) Delete() error { + client := nsxtAlbVirtualService.vcdClient.Client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServices + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if nsxtAlbVirtualService.NsxtAlbVirtualService.ID == "" { + return fmt.Errorf("cannot delete NSX-T ALB Virtual Service without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, nsxtAlbVirtualService.NsxtAlbVirtualService.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T ALB Virtual Service: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_application_profile.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_application_profile.go new file mode 100644 index 000000000..ca55d3b68 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_application_profile.go @@ -0,0 +1,219 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtAppPortProfile uses OpenAPI endpoint to operate NSX-T Application Port Profiles +// It can have 3 types of scopes: +// * SYSTEM - Read-only (The ones that are provided by SYSTEM). Constant `types.ApplicationPortProfileScopeSystem` +// * PROVIDER - Created by Provider on a particular network provider (NSX-T manager). Constant `types.ApplicationPortProfileScopeProvider` +// * TENANT (Created by Tenant at Org VDC level). Constant `types.ApplicationPortProfileScopeTenant` +// +// More details about scope in documentation for types.NsxtAppPortProfile +type NsxtAppPortProfile struct { + NsxtAppPortProfile *types.NsxtAppPortProfile + client *Client +} + +// CreateNsxtAppPortProfile allows users to create NSX-T Application Port Profile definition. +// It can have 3 types of scopes: +// * SYSTEM (The ones that are provided by SYSTEM) Read-only +// * PROVIDER (Created by Provider globally) +// * TENANT (Create by tenant at Org level) +// More details about scope in documentation for types.NsxtAppPortProfile +func (org *Org) CreateNsxtAppPortProfile(appPortProfileConfig *types.NsxtAppPortProfile) (*NsxtAppPortProfile, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles + minimumApiVersion, err := org.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := org.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtAppPortProfile{ + NsxtAppPortProfile: &types.NsxtAppPortProfile{}, + client: org.client, + } + + err = org.client.OpenApiPostItem(minimumApiVersion, urlRef, nil, appPortProfileConfig, returnObject.NsxtAppPortProfile, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T Application Port Profile: %s", err) + } + + return returnObject, nil +} + +// GetAllNsxtAppPortProfiles returns all NSX-T Application Port Profiles for specific scope +// More details about scope in documentation for types.NsxtAppPortProfile +func (org *Org) GetAllNsxtAppPortProfiles(queryParameters url.Values, scope string) ([]*NsxtAppPortProfile, error) { + queryParams := copyOrNewUrlValues(queryParameters) + if scope != "" { + queryParams = queryParameterFilterAnd("scope=="+scope, queryParams) + } + + return getAllNsxtAppPortProfiles(org.client, queryParams) +} + +// GetNsxtAppPortProfileByName allows users to retrieve Application Port Profiles for specific scope. +// More details in documentation for types.NsxtAppPortProfile +// +// Note. Names are enforced to be unique per scope +func (org *Org) GetNsxtAppPortProfileByName(name, scope string) (*NsxtAppPortProfile, error) { + queryParameters := url.Values{} + if scope != "" { + queryParameters = queryParameterFilterAnd("scope=="+scope, queryParameters) + } + + return getNsxtAppPortProfileByName(org.client, name, queryParameters) +} + +// GetNsxtAppPortProfileById retrieves NSX-T Application Port Profile by ID +func (org *Org) GetNsxtAppPortProfileById(id string) (*NsxtAppPortProfile, error) { + return getNsxtAppPortProfileById(org.client, id) +} + +// Update allows users to update NSX-T Application Port Profile +func (appPortProfile *NsxtAppPortProfile) Update(appPortProfileConfig *types.NsxtAppPortProfile) (*NsxtAppPortProfile, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles + minimumApiVersion, err := appPortProfile.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if appPortProfileConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T Application Port Profile without ID") + } + + urlRef, err := appPortProfile.client.OpenApiBuildEndpoint(endpoint, appPortProfileConfig.ID) + if err != nil { + return nil, err + } + + returnObject := &NsxtAppPortProfile{ + NsxtAppPortProfile: &types.NsxtAppPortProfile{}, + client: appPortProfile.client, + } + + err = appPortProfile.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, appPortProfileConfig, returnObject.NsxtAppPortProfile, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T Application Port Profile : %s", err) + } + + return returnObject, nil +} + +// Delete allows users to delete NSX-T Application Port Profile +func (appPortProfile *NsxtAppPortProfile) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles + minimumApiVersion, err := appPortProfile.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if appPortProfile.NsxtAppPortProfile.ID == "" { + return fmt.Errorf("cannot delete NSX-T Application Port Profile without ID") + } + + urlRef, err := appPortProfile.client.OpenApiBuildEndpoint(endpoint, appPortProfile.NsxtAppPortProfile.ID) + if err != nil { + return err + } + + err = appPortProfile.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting NSX-T Application Port Profile: %s", err) + } + + return nil +} + +func getNsxtAppPortProfileByName(client *Client, name string, queryParameters url.Values) (*NsxtAppPortProfile, error) { + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd("name=="+name, queryParams) + + allAppPortProfiles, err := getAllNsxtAppPortProfiles(client, queryParams) + if err != nil { + return nil, fmt.Errorf("could not find NSX-T Application Port Profile with name '%s': %s", name, err) + } + + if len(allAppPortProfiles) == 0 { + return nil, fmt.Errorf("%s: expected exactly one NSX-T Application Port Profile with name '%s'. Got %d", ErrorEntityNotFound, name, len(allAppPortProfiles)) + } + + if len(allAppPortProfiles) > 1 { + return nil, fmt.Errorf("expected exactly one NSX-T Application Port Profile with name '%s'. Got %d", name, len(allAppPortProfiles)) + } + + return getNsxtAppPortProfileById(client, allAppPortProfiles[0].NsxtAppPortProfile.ID) +} + +func getNsxtAppPortProfileById(client *Client, id string) (*NsxtAppPortProfile, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty NSX-T Application Port Profile ID specified") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + appPortProfile := &NsxtAppPortProfile{ + NsxtAppPortProfile: &types.NsxtAppPortProfile{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, appPortProfile.NsxtAppPortProfile, nil) + if err != nil { + return nil, err + } + + return appPortProfile, nil +} + +func getAllNsxtAppPortProfiles(client *Client, queryParameters url.Values) ([]*NsxtAppPortProfile, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtAppPortProfile{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtAppPortProfile types with client + wrappedResponses := make([]*NsxtAppPortProfile, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtAppPortProfile{ + NsxtAppPortProfile: typeResponses[sliceIndex], + client: client, + } + } + + return wrappedResponses, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edge_cluster.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edge_cluster.go new file mode 100644 index 000000000..09991a7d0 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edge_cluster.go @@ -0,0 +1,115 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtEdgeCluster is a logical grouping of NSX-T Edge virtual machines. +type NsxtEdgeCluster struct { + NsxtEdgeCluster *types.NsxtEdgeCluster + client *Client +} + +// GetNsxtEdgeClusterByName retrieves a particular NSX-T Edge Cluster by name available for that VDC +// Note: Multiple NSX-T Edge Clusters with the same name may exist. +func (vdc *Vdc) GetNsxtEdgeClusterByName(name string) (*NsxtEdgeCluster, error) { + if name == "" { + return nil, fmt.Errorf("empty NSX-T Edge Cluster name specified") + } + + // Ideally FIQL filter could be used to filter on server side and get only desired result, but filtering on + // 'name' is not yet supported. The only supported field for filtering is + // _context==urn:vcloud:vdc:09722307-aee0-4623-af95-7f8e577c9ebc to specify parent Org VDC (This + // automatically happens in GetAllNsxtEdgeClusters()). The below filter injection is left as documentation. + /* + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "name=="+name) + */ + + nsxtEdgeClusters, err := vdc.GetAllNsxtEdgeClusters(nil) + if err != nil { + return nil, fmt.Errorf("could not find NSX-T Edge Cluster with name '%s' for Org VDC with id '%s': %s", + name, vdc.Vdc.ID, err) + } + + // TODO remove this when FIQL supports filtering on 'name' + nsxtEdgeClusters = filterNsxtEdgeClusters(name, nsxtEdgeClusters) + // EOF TODO remove this when FIQL supports filtering on 'name' + + if len(nsxtEdgeClusters) == 0 { + // ErrorEntityNotFound is injected here for the ability to validate problem using ContainsNotFound() + return nil, fmt.Errorf("%s: no NSX-T Tier-0 Edge Cluster with name '%s' for Org VDC with id '%s' found", + ErrorEntityNotFound, name, vdc.Vdc.ID) + } + + if len(nsxtEdgeClusters) > 1 { + return nil, fmt.Errorf("more than one (%d) NSX-T Edge Cluster with name '%s' for Org VDC with id '%s' found", + len(nsxtEdgeClusters), name, vdc.Vdc.ID) + } + + return nsxtEdgeClusters[0], nil +} + +// filterNsxtEdgeClusters is a helper to filter NSX-T Edge Clusters by name because the FIQL filter does not support +// filtering by name. +func filterNsxtEdgeClusters(name string, allNnsxtEdgeCluster []*NsxtEdgeCluster) []*NsxtEdgeCluster { + filteredNsxtEdgeClusters := make([]*NsxtEdgeCluster, 0) + for index, nsxtEdgeCluster := range allNnsxtEdgeCluster { + if allNnsxtEdgeCluster[index].NsxtEdgeCluster.Name == name { + filteredNsxtEdgeClusters = append(filteredNsxtEdgeClusters, nsxtEdgeCluster) + } + } + + return filteredNsxtEdgeClusters + +} + +// GetAllNsxtEdgeClusters retrieves all available Edge Clusters for a particular VDC +func (vdc *Vdc) GetAllNsxtEdgeClusters(queryParameters url.Values) ([]*NsxtEdgeCluster, error) { + if vdc.Vdc.ID == "" { + return nil, fmt.Errorf("VDC must have ID populated to retrieve NSX-T edge clusters") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeClusters + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + // Get all NSX-T Edge clusters that are accessible to an organization VDC. The “_context” filter key must be set with + // the ID of the VDC for which we want to get available Edge Clusters for. + // + // _context==urn:vcloud:vdc:09722307-aee0-4623-af95-7f8e577c9ebc + + // Create a copy of queryParameters so that original queryParameters are not mutated (because a map is always a + // reference) + queryParams := queryParameterFilterAnd("_context=="+vdc.Vdc.ID, queryParameters) + + typeResponses := []*types.NsxtEdgeCluster{{}} + err = vdc.client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + returnObjects := make([]*NsxtEdgeCluster, len(typeResponses)) + for sliceIndex := range typeResponses { + returnObjects[sliceIndex] = &NsxtEdgeCluster{ + NsxtEdgeCluster: typeResponses[sliceIndex], + client: vdc.client, + } + } + + return returnObjects, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edgegateway.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edgegateway.go new file mode 100644 index 000000000..000c5f2d6 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_edgegateway.go @@ -0,0 +1,302 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtEdgeGateway uses OpenAPI endpoint to operate NSX-T Edge Gateways +type NsxtEdgeGateway struct { + EdgeGateway *types.OpenAPIEdgeGateway + client *Client +} + +// GetNsxtEdgeGatewayById allows to retrieve NSX-T edge gateway by ID for Org admins +func (adminOrg *AdminOrg) GetNsxtEdgeGatewayById(id string) (*NsxtEdgeGateway, error) { + return getNsxtEdgeGatewayById(adminOrg.client, id, nil) +} + +// GetNsxtEdgeGatewayById allows to retrieve NSX-T edge gateway by ID for Org users +func (org *Org) GetNsxtEdgeGatewayById(id string) (*NsxtEdgeGateway, error) { + return getNsxtEdgeGatewayById(org.client, id, nil) +} + +// GetNsxtEdgeGatewayById allows to retrieve NSX-T edge gateway by ID for specific VDC +func (vdc *Vdc) GetNsxtEdgeGatewayById(id string) (*NsxtEdgeGateway, error) { + params := url.Values{} + filterParams := queryParameterFilterAnd("orgVdc.id=="+vdc.Vdc.ID, params) + egw, err := getNsxtEdgeGatewayById(vdc.client, id, filterParams) + if err != nil { + return nil, err + } + + if egw.EdgeGateway.OrgVdc.ID != vdc.Vdc.ID { + return nil, fmt.Errorf("%s: no NSX-T Edge Gateway with ID '%s' found in VDC '%s'", + ErrorEntityNotFound, id, vdc.Vdc.ID) + } + + return egw, nil +} + +// GetNsxtEdgeGatewayByName allows to retrieve NSX-T edge gateway by Name for Org admins +func (adminOrg *AdminOrg) GetNsxtEdgeGatewayByName(name string) (*NsxtEdgeGateway, error) { + queryParameters := url.Values{} + queryParameters.Add("filter", "name=="+name) + + allEdges, err := adminOrg.GetAllNsxtEdgeGateways(queryParameters) + if err != nil { + return nil, fmt.Errorf("unable to retrieve Edge Gateway by name '%s': %s", name, err) + } + + onlyNsxtEdges := filterOnlyNsxtEdges(allEdges) + + return returnSingleNsxtEdgeGateway(name, onlyNsxtEdges) +} + +// GetNsxtEdgeGatewayByName allows to retrieve NSX-T edge gateway by Name for Org admins +func (org *Org) GetNsxtEdgeGatewayByName(name string) (*NsxtEdgeGateway, error) { + queryParameters := url.Values{} + queryParameters.Add("filter", "name=="+name) + + allEdges, err := org.GetAllNsxtEdgeGateways(queryParameters) + if err != nil { + return nil, fmt.Errorf("unable to retrieve Edge Gateway by name '%s': %s", name, err) + } + + onlyNsxtEdges := filterOnlyNsxtEdges(allEdges) + + return returnSingleNsxtEdgeGateway(name, onlyNsxtEdges) +} + +// GetNsxtEdgeGatewayByName allows to retrieve NSX-T edge gateway by Name for specific VDC +func (vdc *Vdc) GetNsxtEdgeGatewayByName(name string) (*NsxtEdgeGateway, error) { + queryParameters := url.Values{} + queryParameters.Add("filter", "name=="+name) + + allEdges, err := vdc.GetAllNsxtEdgeGateways(queryParameters) + if err != nil { + return nil, fmt.Errorf("unable to retrieve Edge Gateway by name '%s': %s", name, err) + } + + return returnSingleNsxtEdgeGateway(name, allEdges) +} + +// GetAllNsxtEdgeGateways allows to retrieve all NSX-T edge gateways for Org Admins +func (adminOrg *AdminOrg) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) { + return getAllNsxtEdgeGateways(adminOrg.client, queryParameters) +} + +// GetAllNsxtEdgeGateways allows to retrieve all NSX-T edge gateways for Org users +func (org *Org) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) { + return getAllNsxtEdgeGateways(org.client, queryParameters) +} + +// GetAllNsxtEdgeGateways allows to retrieve all NSX-T edge gateways for specific VDC +func (vdc *Vdc) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) { + filteredQueryParams := queryParameterFilterAnd("orgVdc.id=="+vdc.Vdc.ID, queryParameters) + return getAllNsxtEdgeGateways(vdc.client, filteredQueryParams) +} + +// CreateNsxtEdgeGateway allows to create NSX-T edge gateway for Org admins +func (adminOrg *AdminOrg) CreateNsxtEdgeGateway(edgeGatewayConfig *types.OpenAPIEdgeGateway) (*NsxtEdgeGateway, error) { + if !adminOrg.client.IsSysAdmin { + return nil, fmt.Errorf("only System Administrator can create Edge Gateway") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnEgw := &NsxtEdgeGateway{ + EdgeGateway: &types.OpenAPIEdgeGateway{}, + client: adminOrg.client, + } + + err = adminOrg.client.OpenApiPostItem(minimumApiVersion, urlRef, nil, edgeGatewayConfig, returnEgw.EdgeGateway, nil) + if err != nil { + return nil, fmt.Errorf("error creating Edge Gateway: %s", err) + } + + return returnEgw, nil +} + +// Update allows to update NSX-T edge gateway for Org admins +func (egw *NsxtEdgeGateway) Update(edgeGatewayConfig *types.OpenAPIEdgeGateway) (*NsxtEdgeGateway, error) { + if !egw.client.IsSysAdmin { + return nil, fmt.Errorf("only System Administrator can update Edge Gateway") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways + minimumApiVersion, err := egw.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if edgeGatewayConfig.ID == "" { + return nil, fmt.Errorf("cannot update Edge Gateway without ID") + } + + urlRef, err := egw.client.OpenApiBuildEndpoint(endpoint, edgeGatewayConfig.ID) + if err != nil { + return nil, err + } + + returnEgw := &NsxtEdgeGateway{ + EdgeGateway: &types.OpenAPIEdgeGateway{}, + client: egw.client, + } + + err = egw.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, edgeGatewayConfig, returnEgw.EdgeGateway, nil) + if err != nil { + return nil, fmt.Errorf("error updating Edge Gateway: %s", err) + } + + return returnEgw, nil +} + +// Delete allows to delete NSX-T edge gateway for sysadmins +func (egw *NsxtEdgeGateway) Delete() error { + if !egw.client.IsSysAdmin { + return fmt.Errorf("only Provider can delete Edge Gateway") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways + minimumApiVersion, err := egw.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if egw.EdgeGateway.ID == "" { + return fmt.Errorf("cannot delete Edge Gateway without ID") + } + + urlRef, err := egw.client.OpenApiBuildEndpoint(endpoint, egw.EdgeGateway.ID) + if err != nil { + return err + } + + err = egw.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting Edge Gateway: %s", err) + } + + return nil +} + +// getNsxtEdgeGatewayById is a private parent for wrapped functions: +// func (adminOrg *AdminOrg) GetNsxtEdgeGatewayByName(id string) (*NsxtEdgeGateway, error) +// func (org *Org) GetNsxtEdgeGatewayByName(id string) (*NsxtEdgeGateway, error) +// func (vdc *Vdc) GetNsxtEdgeGatewayById(id string) (*NsxtEdgeGateway, error) +func getNsxtEdgeGatewayById(client *Client, id string, queryParameters url.Values) (*NsxtEdgeGateway, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty Edge Gateway ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + egw := &NsxtEdgeGateway{ + EdgeGateway: &types.OpenAPIEdgeGateway{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, queryParameters, egw.EdgeGateway, nil) + if err != nil { + return nil, err + } + + if egw.EdgeGateway.GatewayBacking.GatewayType != "NSXT_BACKED" { + return nil, fmt.Errorf("%s: this is not NSX-T Edge Gateway (%s)", + ErrorEntityNotFound, egw.EdgeGateway.GatewayBacking.GatewayType) + } + + return egw, nil +} + +// returnSingleNsxtEdgeGateway helps to reduce code duplication for `GetNsxtEdgeGatewayByName` functions with different +// receivers +func returnSingleNsxtEdgeGateway(name string, allEdges []*NsxtEdgeGateway) (*NsxtEdgeGateway, error) { + if len(allEdges) > 1 { + return nil, fmt.Errorf("got more than 1 Edge Gateway by name '%s' %d", name, len(allEdges)) + } + + if len(allEdges) < 1 { + return nil, fmt.Errorf("%s: got 0 Edge Gateways by name '%s'", ErrorEntityNotFound, name) + } + + return allEdges[0], nil +} + +// getAllNsxtEdgeGateways is a private parent for wrapped functions: +// func (adminOrg *AdminOrg) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) +// func (org *Org) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) +// func (vdc *Vdc) GetAllNsxtEdgeGateways(queryParameters url.Values) ([]*NsxtEdgeGateway, error) +func getAllNsxtEdgeGateways(client *Client, queryParameters url.Values) ([]*NsxtEdgeGateway, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.OpenAPIEdgeGateway{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtEdgeGateway types with client + wrappedResponses := make([]*NsxtEdgeGateway, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtEdgeGateway{ + EdgeGateway: typeResponses[sliceIndex], + client: client, + } + } + + onlyNsxtEdges := filterOnlyNsxtEdges(wrappedResponses) + + return onlyNsxtEdges, nil +} + +// filterOnlyNsxtEdges filters our list of edge gateways only for NSXT_BACKED ones because original endpoint can +// return NSX-V and NSX-T backed edge gateways. +func filterOnlyNsxtEdges(allEdges []*NsxtEdgeGateway) []*NsxtEdgeGateway { + filteredEdges := make([]*NsxtEdgeGateway, 0) + + for index := range allEdges { + if allEdges[index] != nil && allEdges[index].EdgeGateway != nil && + allEdges[index].EdgeGateway.GatewayBacking != nil && + allEdges[index].EdgeGateway.GatewayBacking.GatewayType == "NSXT_BACKED" { + filteredEdges = append(filteredEdges, allEdges[index]) + } + } + + return filteredEdges +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall.go new file mode 100644 index 000000000..4fe9daf48 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall.go @@ -0,0 +1,136 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtFirewall contains a types.NsxtFirewallRuleContainer which encloses three types of rules - +// system, default and user defined rules. User defined rules are the only ones that can be modified, others are +// read-only. +type NsxtFirewall struct { + NsxtFirewallRuleContainer *types.NsxtFirewallRuleContainer + client *Client + // edgeGatewayId is stored for usage in NsxtFirewall receiver functions + edgeGatewayId string +} + +// UpdateNsxtFirewall allows user to set new firewall rules or update existing ones. The API does not have POST endpoint +// and always uses PUT endpoint for creating and updating. +func (egw *NsxtEdgeGateway) UpdateNsxtFirewall(firewallRules *types.NsxtFirewallRuleContainer) (*NsxtFirewall, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtFirewallRules + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + // Insert Edge Gateway ID into endpoint path edgeGateways/%s/firewall/rules + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + returnObject := &NsxtFirewall{ + NsxtFirewallRuleContainer: &types.NsxtFirewallRuleContainer{}, + client: client, + edgeGatewayId: egw.EdgeGateway.ID, + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, firewallRules, returnObject.NsxtFirewallRuleContainer, nil) + if err != nil { + return nil, fmt.Errorf("error setting NSX-T Firewall: %s", err) + } + + return returnObject, nil +} + +// GetNsxtFirewall retrieves all firewall rules system, default and user defined rules +func (egw *NsxtEdgeGateway) GetNsxtFirewall() (*NsxtFirewall, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtFirewallRules + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + // Insert Edge Gateway ID into endpoint path edgeGateways/%s/firewall/rules + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + returnObject := &NsxtFirewall{ + NsxtFirewallRuleContainer: &types.NsxtFirewallRuleContainer{}, + client: client, + edgeGatewayId: egw.EdgeGateway.ID, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, returnObject.NsxtFirewallRuleContainer, nil) + if err != nil { + return nil, fmt.Errorf("error retrieving NSX-T Firewall rules: %s", err) + } + + // Store Edge Gateway ID for later operations + returnObject.edgeGatewayId = egw.EdgeGateway.ID + + return returnObject, nil +} + +// DeleteAllRules allows users to delete all NSX-T Firewall rules in a particular Edge Gateway +func (firewall *NsxtFirewall) DeleteAllRules() error { + + if firewall.edgeGatewayId == "" { + return fmt.Errorf("missing Edge Gateway ID") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtFirewallRules + minimumApiVersion, err := firewall.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + urlRef, err := firewall.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, firewall.edgeGatewayId)) + if err != nil { + return err + } + + err = firewall.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting all NSX-T Firewall Rules: %s", err) + } + + return nil +} + +// DeleteRuleById allows users to delete NSX-T Firewall Rule By ID +func (firewall *NsxtFirewall) DeleteRuleById(id string) error { + if id == "" { + return fmt.Errorf("empty ID specified") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtFirewallRules + minimumApiVersion, err := firewall.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + urlRef, err := firewall.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, firewall.edgeGatewayId), "/", id) + if err != nil { + return err + } + + err = firewall.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting NSX-T Firewall Rule with ID '%s': %s", id, err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall_group.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall_group.go new file mode 100644 index 000000000..62c7612db --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_firewall_group.go @@ -0,0 +1,376 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtFirewallGroup uses OpenAPI endpoint to operate NSX-T Security Groups and IP Sets which use +// the same Firewall Group API endpoint +// +// IP sets are groups of objects to which the firewall rules apply. Combining multiple objects into +// IP sets helps reduce the total number of firewall rules to be created. +// +// Security groups are groups of Org Vdc networks to which distributed firewall rules apply. +// Grouping networks helps you to reduce the total number of distributed firewall rules to be +// created. +type NsxtFirewallGroup struct { + NsxtFirewallGroup *types.NsxtFirewallGroup + client *Client +} + +// CreateNsxtFirewallGroup allows users to create NSX-T Firewall Group +func (vdc *Vdc) CreateNsxtFirewallGroup(firewallGroupConfig *types.NsxtFirewallGroup) (*NsxtFirewallGroup, error) { + return createNsxtFirewallGroup(vdc.client, firewallGroupConfig) +} + +// CreateNsxtFirewallGroup allows users to create NSX-T Firewall Group +func (egw *NsxtEdgeGateway) CreateNsxtFirewallGroup(firewallGroupConfig *types.NsxtFirewallGroup) (*NsxtFirewallGroup, error) { + return createNsxtFirewallGroup(egw.client, firewallGroupConfig) +} + +// GetAllNsxtFirewallGroups allows users to retrieve all Firewall Groups for Org +// firewallGroupType can be one of the following: +// * types.FirewallGroupTypeSecurityGroup - for NSX-T Security Groups +// * types.FirewallGroupTypeIpSet - for NSX-T IP Sets +// * "" (empty) - search will not be limited and will get both - IP Sets and Security Groups +// +// It is possible to add additional filtering by using queryParameters of type 'url.Values'. +// One special filter is `_context==` filtering. Value can be one of the following: +// +// * Org Vdc Network ID (_context==networkId) - Returns all the firewall groups which the specified +// network is a member of. +// +// * Edge Gateway ID (_context==edgeGatewayId) - Returns all the firewall groups which are available +// to the specific edge gateway. Or use a shorthand NsxtEdgeGateway.GetAllNsxtFirewallGroups() which +// automatically injects this filter. +// +// * Network Provider ID (_context==networkProviderId) - Returns all the firewall groups which are +// available under a specific network provider. This context requires system admin privilege. +// 'networkProviderId' is NSX-T manager ID +func (org *Org) GetAllNsxtFirewallGroups(queryParameters url.Values, firewallGroupType string) ([]*NsxtFirewallGroup, error) { + queryParams := copyOrNewUrlValues(queryParameters) + if firewallGroupType != "" { + queryParams = queryParameterFilterAnd("type=="+firewallGroupType, queryParams) + } + + return getAllNsxtFirewallGroups(org.client, queryParams) +} + +// GetAllNsxtFirewallGroups allows users to retrieve all NSX-T Firewall Groups +func (vdc *Vdc) GetAllNsxtFirewallGroups(queryParameters url.Values, firewallGroupType string) ([]*NsxtFirewallGroup, error) { + if vdc.IsNsxv() { + return nil, errors.New("only NSX-T VDCs support Firewall Groups") + } + return getAllNsxtFirewallGroups(vdc.client, queryParameters) +} + +// GetAllNsxtFirewallGroups allows users to retrieve all NSX-T Firewall Groups in a particular Edge Gateway +// firewallGroupType can be one of the following: +// * types.FirewallGroupTypeSecurityGroup - for NSX-T Security Groups +// * types.FirewallGroupTypeIpSet - for NSX-T IP Sets +// * "" (empty) - search will not be limited and will get both - IP Sets and Security Groups +func (egw *NsxtEdgeGateway) GetAllNsxtFirewallGroups(queryParameters url.Values, firewallGroupType string) ([]*NsxtFirewallGroup, error) { + queryParams := copyOrNewUrlValues(queryParameters) + + if firewallGroupType != "" { + queryParams = queryParameterFilterAnd("type=="+firewallGroupType, queryParams) + } + + // Automatically inject Edge Gateway filter because this is an Edge Gateway scoped query + queryParams = queryParameterFilterAnd("_context=="+egw.EdgeGateway.ID, queryParams) + + return getAllNsxtFirewallGroups(egw.client, queryParams) +} + +// GetNsxtFirewallGroupByName allows users to retrieve Firewall Group by Name +// firewallGroupType can be one of the following: +// * types.FirewallGroupTypeSecurityGroup - for NSX-T Security Groups +// * types.FirewallGroupTypeIpSet - for NSX-T IP Sets +// * "" (empty) - search will not be limited and will get both - IP Sets and Security Groups +// +// Note. One might get an error if IP Set and Security Group exist with the same name (two objects +// of the same type cannot exist) and firewallGroupType is left empty. +func (org *Org) GetNsxtFirewallGroupByName(name, firewallGroupType string) (*NsxtFirewallGroup, error) { + queryParameters := url.Values{} + if firewallGroupType != "" { + queryParameters = queryParameterFilterAnd("type=="+firewallGroupType, queryParameters) + } + + return getNsxtFirewallGroupByName(org.client, name, queryParameters) +} + +// GetNsxtFirewallGroupByName allows users to retrieve Firewall Group by Name +// firewallGroupType can be one of the following: +// * types.FirewallGroupTypeSecurityGroup - for NSX-T Security Groups +// * types.FirewallGroupTypeIpSet - for NSX-T IP Sets +// * "" (empty) - search will not be limited and will get both - IP Sets and Security Groups +// +// Note. One might get an error if IP Set and Security Group exist with the same name (two objects +// of the same type cannot exist) and firewallGroupType is left empty. +func (vdc *Vdc) GetNsxtFirewallGroupByName(name, firewallGroupType string) (*NsxtFirewallGroup, error) { + + queryParameters := url.Values{} + if firewallGroupType != "" { + queryParameters = queryParameterFilterAnd("type=="+firewallGroupType, queryParameters) + } + return getNsxtFirewallGroupByName(vdc.client, name, queryParameters) +} + +// GetNsxtFirewallGroupByName allows users to retrieve Firewall Group by Name in a particular Edge Gateway +// firewallGroupType can be one of the following: +// * types.FirewallGroupTypeSecurityGroup - for NSX-T Security Groups +// * types.FirewallGroupTypeIpSet - for NSX-T IP Sets +// * "" (empty) - search will not be limited and will get both - IP Sets and Security Groups +// +// Note. One might get an error if IP Set and Security Group exist with the same name (two objects +// of the same type cannot exist) and firewallGroupType is left empty. +func (egw *NsxtEdgeGateway) GetNsxtFirewallGroupByName(name string, firewallGroupType string) (*NsxtFirewallGroup, error) { + queryParameters := url.Values{} + + if firewallGroupType != "" { + queryParameters = queryParameterFilterAnd("type=="+firewallGroupType, queryParameters) + } + + // Automatically inject Edge Gateway filter because this is an Edge Gateway scoped query + queryParameters = queryParameterFilterAnd("_context=="+egw.EdgeGateway.ID, queryParameters) + + return getNsxtFirewallGroupByName(egw.client, name, queryParameters) +} + +// GetNsxtFirewallGroupById retrieves NSX-T Firewall Group by ID +func (org *Org) GetNsxtFirewallGroupById(id string) (*NsxtFirewallGroup, error) { + return getNsxtFirewallGroupById(org.client, id) +} + +// GetNsxtFirewallGroupById retrieves NSX-T Firewall Group by ID +func (vdc *Vdc) GetNsxtFirewallGroupById(id string) (*NsxtFirewallGroup, error) { + return getNsxtFirewallGroupById(vdc.client, id) +} + +// GetNsxtFirewallGroupById retrieves NSX-T Firewall Group by ID +func (egw *NsxtEdgeGateway) GetNsxtFirewallGroupById(id string) (*NsxtFirewallGroup, error) { + return getNsxtFirewallGroupById(egw.client, id) +} + +// Update allows users to update NSX-T Firewall Group +func (firewallGroup *NsxtFirewallGroup) Update(firewallGroupConfig *types.NsxtFirewallGroup) (*NsxtFirewallGroup, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := firewallGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if firewallGroupConfig.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T Firewall Group without ID") + } + + urlRef, err := firewallGroup.client.OpenApiBuildEndpoint(endpoint, firewallGroupConfig.ID) + if err != nil { + return nil, err + } + + returnObject := &NsxtFirewallGroup{ + NsxtFirewallGroup: &types.NsxtFirewallGroup{}, + client: firewallGroup.client, + } + + err = firewallGroup.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, firewallGroupConfig, returnObject.NsxtFirewallGroup, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T firewall group: %s", err) + } + + return returnObject, nil +} + +// Delete allows users to delete NSX-T Firewall Group +func (firewallGroup *NsxtFirewallGroup) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := firewallGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if firewallGroup.NsxtFirewallGroup.ID == "" { + return fmt.Errorf("cannot delete NSX-T Firewall Group without ID") + } + + urlRef, err := firewallGroup.client.OpenApiBuildEndpoint(endpoint, firewallGroup.NsxtFirewallGroup.ID) + if err != nil { + return err + } + + err = firewallGroup.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting NSX-T Firewall Group: %s", err) + } + + return nil +} + +// GetAssociatedVms allows users to retrieve a list of references to child VMs (with vApps when they exist). +// +// Note. Only Security Groups have associated VMs. Executing it on an IP Set will return an error +// similar to: "only Security Groups have associated VMs. This Firewall Group has type 'IP_SET'" +func (firewallGroup *NsxtFirewallGroup) GetAssociatedVms() ([]*types.NsxtFirewallGroupMemberVms, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := firewallGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if firewallGroup.NsxtFirewallGroup.ID == "" { + return nil, fmt.Errorf("cannot retrieve associated VMs for NSX-T Firewall Group without ID") + } + + if !firewallGroup.IsSecurityGroup() { + return nil, fmt.Errorf("only Security Groups have associated VMs. This Firewall Group has type '%s'", + firewallGroup.NsxtFirewallGroup.Type) + } + + urlRef, err := firewallGroup.client.OpenApiBuildEndpoint(endpoint, firewallGroup.NsxtFirewallGroup.ID, "/associatedVMs") + if err != nil { + return nil, err + } + + associatedVms := []*types.NsxtFirewallGroupMemberVms{{}} + + err = firewallGroup.client.OpenApiGetAllItems(minimumApiVersion, urlRef, nil, &associatedVms, nil) + + if err != nil { + return nil, fmt.Errorf("error retrieving associated VMs: %s", err) + } + + return associatedVms, nil +} + +// IsSecurityGroup allows users to check if Firewall Group is a Security Group +func (firewallGroup *NsxtFirewallGroup) IsSecurityGroup() bool { + return firewallGroup.NsxtFirewallGroup.Type == types.FirewallGroupTypeSecurityGroup +} + +// IsIpSet allows users to check if Firewall Group is an IP Set +func (firewallGroup *NsxtFirewallGroup) IsIpSet() bool { + return firewallGroup.NsxtFirewallGroup.Type == types.FirewallGroupTypeIpSet +} + +func getNsxtFirewallGroupByName(client *Client, name string, queryParameters url.Values) (*NsxtFirewallGroup, error) { + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd("name=="+name, queryParams) + + allGroups, err := getAllNsxtFirewallGroups(client, queryParams) + if err != nil { + return nil, fmt.Errorf("could not find NSX-T Firewall Group with name '%s': %s", name, err) + } + + if len(allGroups) == 0 { + return nil, fmt.Errorf("%s: expected exactly one NSX-T Firewall Group with name '%s'. Got %d", ErrorEntityNotFound, name, len(allGroups)) + } + + if len(allGroups) > 1 { + return nil, fmt.Errorf("expected exactly one NSX-T Firewall Group with name '%s'. Got %d", name, len(allGroups)) + } + + // TODO API V36.0 - maybe it is fixed + // There is a bug that not all data is present (e.g. missing IpAddresses field for IP_SET) when + // using "getAll" endpoint therefore after finding the object by name we must retrieve it once + // again using its direct endpoint. + // + // return allGroups[0], nil + + return getNsxtFirewallGroupById(client, allGroups[0].NsxtFirewallGroup.ID) +} + +func getNsxtFirewallGroupById(client *Client, id string) (*NsxtFirewallGroup, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty NSX-T Firewall Group ID specified") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + fwGroup := &NsxtFirewallGroup{ + NsxtFirewallGroup: &types.NsxtFirewallGroup{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, fwGroup.NsxtFirewallGroup, nil) + if err != nil { + return nil, err + } + + return fwGroup, nil +} + +func getAllNsxtFirewallGroups(client *Client, queryParameters url.Values) ([]*NsxtFirewallGroup, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + // This Object does not follow regular REST scheme and for get the endpoint must be + // 1.0.0/firewallGroups/summaries therefore bellow "summaries" is appended to the path + urlRef, err := client.OpenApiBuildEndpoint(endpoint, "summaries") + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtFirewallGroup{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtEdgeGateway types with client + wrappedResponses := make([]*NsxtFirewallGroup, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtFirewallGroup{ + NsxtFirewallGroup: typeResponses[sliceIndex], + client: client, + } + } + + return wrappedResponses, nil +} + +func createNsxtFirewallGroup(client *Client, firewallGroupConfig *types.NsxtFirewallGroup) (*NsxtFirewallGroup, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnObject := &NsxtFirewallGroup{ + NsxtFirewallGroup: &types.NsxtFirewallGroup{}, + client: client, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, firewallGroupConfig, returnObject.NsxtFirewallGroup, nil) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T Firewall Group: %s", err) + } + + return returnObject, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_importable_switch.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_importable_switch.go new file mode 100644 index 000000000..893f27a40 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_importable_switch.go @@ -0,0 +1,155 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// NsxtImportableSwitch is a read only object to retrieve NSX-T segments (importable switches) to be used for Org VDC +// imported network. +type NsxtImportableSwitch struct { + NsxtImportableSwitch *types.NsxtImportableSwitch + client *Client +} + +// GetNsxtImportableSwitchByName retrieves a particular NSX-T Segment by name available for that VDC +// +// Note. OpenAPI endpoint does not exist for this resource and by default endpoint +// "/network/orgvdcnetworks/importableswitches" returns only unused NSX-T importable switches (the ones that are not +// already consumed in Org VDC networks) and there is no way to get them all (including the used ones). +func (vdc *Vdc) GetNsxtImportableSwitchByName(name string) (*NsxtImportableSwitch, error) { + if name == "" { + return nil, fmt.Errorf("empty NSX-T Importable Switch name specified") + } + + allNsxtImportableSwitches, err := vdc.GetAllNsxtImportableSwitches() + if err != nil { + return nil, fmt.Errorf("error getting all NSX-T Importable Switches for VDC '%s': %s", vdc.Vdc.Name, err) + } + + var filteredNsxtImportableSwitches []*NsxtImportableSwitch + for _, nsxtImportableSwitch := range allNsxtImportableSwitches { + if nsxtImportableSwitch.NsxtImportableSwitch.Name == name { + filteredNsxtImportableSwitches = append(filteredNsxtImportableSwitches, nsxtImportableSwitch) + } + } + + if len(filteredNsxtImportableSwitches) == 0 { + // ErrorEntityNotFound is injected here for the ability to validate problem using ContainsNotFound() + return nil, fmt.Errorf("%s: no NSX-T Importable Switch with name '%s' for Org VDC with ID '%s' found", + ErrorEntityNotFound, name, vdc.Vdc.ID) + } + + if len(filteredNsxtImportableSwitches) > 1 { + return nil, fmt.Errorf("more than one (%d) NSX-T Importable Switch with name '%s' for Org VDC with ID '%s' found", + len(filteredNsxtImportableSwitches), name, vdc.Vdc.ID) + } + + return filteredNsxtImportableSwitches[0], nil +} + +// GetAllNsxtImportableSwitches retrieves all available importable switches which can be consumed for creating NSX-T +// "Imported" Org VDC network +// +// Note. OpenAPI endpoint does not exist for this resource and by default endpoint +// "/network/orgvdcnetworks/importableswitches" returns only unused NSX-T importable switches (the ones that are not +// already consumed in Org VDC networks) and there is no way to get them all. +func (vdc *Vdc) GetAllNsxtImportableSwitches() ([]*NsxtImportableSwitch, error) { + if vdc.Vdc.ID == "" { + return nil, fmt.Errorf("VDC must have ID populated to retrieve NSX-T importable switches") + } + // request requires Org VDC ID to be specified as UUID, not as URN + orgVdcId, err := getBareEntityUuid(vdc.Vdc.ID) + if err != nil { + return nil, fmt.Errorf("could not get UUID from URN '%s': %s", vdc.Vdc.ID, err) + } + filter := map[string]string{"orgVdc": orgVdcId} + + return getFilteredNsxtImportableSwitches(filter, vdc.client) +} + +// GetFilteredNsxtImportableSwitches returns all available importable switches. +// One of the filters below is required (using plain UUID - not URN): +// * orgVdc +// * nsxTManager (only in VCD 10.3.0+) +// +// Note. OpenAPI endpoint does not exist for this resource and by default endpoint +// "/network/orgvdcnetworks/importableswitches" returns only unused NSX-T importable switches (the ones that are not +// already consumed in Org VDC networks) and there is no way to get them all. +func (vcdClient *VCDClient) GetFilteredNsxtImportableSwitches(filter map[string]string) ([]*NsxtImportableSwitch, error) { + return getFilteredNsxtImportableSwitches(filter, &vcdClient.Client) +} + +// GetFilteredNsxtImportableSwitchesByName builds on top of GetFilteredNsxtImportableSwitches and additionally performs +// client side filtering by Name +func (vcdClient *VCDClient) GetFilteredNsxtImportableSwitchesByName(filter map[string]string, name string) (*NsxtImportableSwitch, error) { + importableSwitches, err := getFilteredNsxtImportableSwitches(filter, &vcdClient.Client) + if err != nil { + return nil, fmt.Errorf("error getting list of filtered Importable Switches: %s", err) + } + + var foundImportableSwitch bool + var foundSwitches []*NsxtImportableSwitch + + for index, impSwitch := range importableSwitches { + if importableSwitches[index].NsxtImportableSwitch.Name == name { + foundImportableSwitch = true + foundSwitches = append(foundSwitches, impSwitch) + } + } + + if !foundImportableSwitch { + return nil, fmt.Errorf("%s: Importable Switch with name '%s' not found", ErrorEntityNotFound, name) + } + + if len(foundSwitches) > 1 { + return nil, fmt.Errorf("found multiple Importable Switches with name '%s'", name) + } + + return foundSwitches[0], nil +} + +// getFilteredNsxtImportableSwitches is extracted so that it can be reused across multiple functions +func getFilteredNsxtImportableSwitches(filter map[string]string, client *Client) ([]*NsxtImportableSwitch, error) { + apiEndpoint := client.VCDHREF + endpoint := apiEndpoint.Scheme + "://" + apiEndpoint.Host + "/network/orgvdcnetworks/importableswitches/" + // error below is ignored because it is a static endpoint + urlRef, err := url.Parse(endpoint) + if err != nil { + util.Logger.Printf("[DEBUG - getFilteredNsxtImportableSwitches] error parsing URL: %s", err) + } + + headAccept := http.Header{} + headAccept.Set("Accept", types.JSONMime) + request := client.newRequest(filter, nil, http.MethodGet, *urlRef, nil, client.APIVersion, headAccept) + request.Header.Set("Accept", types.JSONMime) + + response, err := checkResp(client.Http.Do(request)) + if err != nil { + return nil, err + } + defer response.Body.Close() + + var nsxtImportableSwitches []*types.NsxtImportableSwitch + if err = decodeBody(types.BodyTypeJSON, response, &nsxtImportableSwitches); err != nil { + return nil, err + } + + wrappedNsxtImportableSwitches := make([]*NsxtImportableSwitch, len(nsxtImportableSwitches)) + for sliceIndex := range nsxtImportableSwitches { + wrappedNsxtImportableSwitches[sliceIndex] = &NsxtImportableSwitch{ + NsxtImportableSwitch: nsxtImportableSwitches[sliceIndex], + client: client, + } + } + + return wrappedNsxtImportableSwitches, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_ipsec_vpn_tunnel.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_ipsec_vpn_tunnel.go new file mode 100644 index 000000000..d830f2d69 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_ipsec_vpn_tunnel.go @@ -0,0 +1,342 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// NsxtIpSecVpnTunnel offers site-to-site connectivity between an Edge Gateway and remote sites which also use NSX-T +// Data Center or which have either third-party hardware routers or VPN gateways that support IPsec. Policy-based IPsec +// VPN requires a VPN policy to be applied to packets to determine which traffic is to be protected by IPsec before +// being passed through a VPN tunnel. This type of VPN is considered static because when a local network topology and +// configuration change, the VPN policy settings must also be updated to accommodate the changes. NSX-T Data Center Edge +// Gateways support split tunnel configuration, with IPsec traffic taking routing precedence. VMware Cloud Director +// supports automatic route redistribution when you use IPsec VPN on an NSX-T edge gateway. +type NsxtIpSecVpnTunnel struct { + NsxtIpSecVpn *types.NsxtIpSecVpnTunnel + client *Client + // edgeGatewayId is stored here so that pointer receiver functions can embed edge gateway ID into path + edgeGatewayId string +} + +// GetAllIpSecVpnTunnels returns all IPsec VPN Tunnel configurations +func (egw *NsxtEdgeGateway) GetAllIpSecVpnTunnels(queryParameters url.Values) ([]*NsxtIpSecVpnTunnel, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtIpSecVpnTunnel{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtIpSecVpnTunnel types with client + wrappedResponses := make([]*NsxtIpSecVpnTunnel, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtIpSecVpnTunnel{ + NsxtIpSecVpn: typeResponses[sliceIndex], + client: client, + edgeGatewayId: egw.EdgeGateway.ID, + } + } + + return wrappedResponses, nil +} + +//GetIpSecVpnTunnelById retrieves single IPsec VPN Tunnel by ID +func (egw *NsxtEdgeGateway) GetIpSecVpnTunnelById(id string) (*NsxtIpSecVpnTunnel, error) { + if id == "" { + return nil, fmt.Errorf("canot find NSX-T IPsec VPN Tunnel configuration without ID") + } + + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID), id) + if err != nil { + return nil, err + } + + returnObject := &NsxtIpSecVpnTunnel{ + NsxtIpSecVpn: &types.NsxtIpSecVpnTunnel{}, + client: client, + edgeGatewayId: egw.EdgeGateway.ID, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, returnObject.NsxtIpSecVpn, nil) + if err != nil { + return nil, err + } + + return returnObject, nil +} + +// GetIpSecVpnTunnelByName retrieves single IPsec VPN Tunnel by Name. +// +// Note. Name uniqueness is not enforced therefore it might exist a few IPsec VPN Tunnels with the same name. +// An error will be returned in that case. +func (egw *NsxtEdgeGateway) GetIpSecVpnTunnelByName(name string) (*NsxtIpSecVpnTunnel, error) { + if name == "" { + return nil, fmt.Errorf("canot find NSX-T IPsec VPN Tunnel configuration without Name") + } + + allVpns, err := egw.GetAllIpSecVpnTunnels(nil) + if err != nil { + return nil, fmt.Errorf("error retrieving all NSX-T IPsec VPN Tunnel configurations: %s", err) + } + + var allResults []*NsxtIpSecVpnTunnel + + for _, vpnConfig := range allVpns { + if vpnConfig.NsxtIpSecVpn.Name == name { + allResults = append(allResults, vpnConfig) + } + } + + if len(allResults) > 1 { + return nil, fmt.Errorf("error - found %d NSX-T IPsec VPN Tunnel configuratios with Name '%s'. Expected 1", + len(allResults), name) + } + + if len(allResults) == 0 { + return nil, ErrorEntityNotFound + } + + // Retrieving again the object by ID, because only it includes Pre-shared Key + return egw.GetIpSecVpnTunnelById(allResults[0].NsxtIpSecVpn.ID) +} + +// CreateIpSecVpnTunnel creates IPsec VPN Tunnel and returns it +func (egw *NsxtEdgeGateway) CreateIpSecVpnTunnel(ipSecVpnConfig *types.NsxtIpSecVpnTunnel) (*NsxtIpSecVpnTunnel, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + task, err := client.OpenApiPostItemAsync(minimumApiVersion, urlRef, nil, ipSecVpnConfig) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T IPsec VPN Tunnel configuration: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("task failed while creating NSX-T IPsec VPN Tunnel configuration: %s", err) + } + + // filtering even by Name is not supported on VCD side + allVpns, err := egw.GetAllIpSecVpnTunnels(nil) + if err != nil { + return nil, fmt.Errorf("error retrieving all NSX-T IPsec VPN Tunnel configuration after creation: %s", err) + } + + for index, singleConfig := range allVpns { + if singleConfig.IsEqualTo(ipSecVpnConfig) { + // retrieve exact value by ID, because only this endpoint includes private key + ipSecVpn, err := egw.GetIpSecVpnTunnelById(allVpns[index].NsxtIpSecVpn.ID) + if err != nil { + return nil, fmt.Errorf("error retrieving NSX-T IPsec VPN Tunnel configuration: %s", err) + } + + return ipSecVpn, nil + } + } + + return nil, fmt.Errorf("error finding NSX-T IPsec VPN Tunnel configuration after creation: %s", ErrorEntityNotFound) +} + +// Update updates NSX-T IPsec VPN Tunnel configuration with newly supplied data. +func (ipSecVpn *NsxtIpSecVpnTunnel) Update(ipSecVpnConfig *types.NsxtIpSecVpnTunnel) (*NsxtIpSecVpnTunnel, error) { + client := ipSecVpn.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel + apiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if ipSecVpn.NsxtIpSecVpn.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T IPsec VPN Tunnel configuration without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, ipSecVpn.edgeGatewayId), ipSecVpn.NsxtIpSecVpn.ID) + if err != nil { + return nil, err + } + + returnObject := &NsxtIpSecVpnTunnel{ + NsxtIpSecVpn: &types.NsxtIpSecVpnTunnel{}, + client: client, + edgeGatewayId: ipSecVpn.edgeGatewayId, + } + + err = client.OpenApiPutItem(apiVersion, urlRef, nil, ipSecVpnConfig, returnObject.NsxtIpSecVpn, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T IPsec VPN Tunnel configuration: %s", err) + } + + return returnObject, nil +} + +// Delete allows users to delete NSX-T IPsec VPN Tunnel +func (ipSecVpn *NsxtIpSecVpnTunnel) Delete() error { + client := ipSecVpn.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if ipSecVpn.NsxtIpSecVpn.ID == "" { + return fmt.Errorf("cannot delete NSX-T IPsec VPN Tunnel configuration without ID") + } + + urlRef, err := ipSecVpn.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, ipSecVpn.edgeGatewayId), ipSecVpn.NsxtIpSecVpn.ID) + if err != nil { + return err + } + + err = ipSecVpn.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + if err != nil { + return fmt.Errorf("error deleting NSX-T IPsec VPN Tunnel configuration: %s", err) + } + + return nil +} + +// GetStatus returns status of IPsec VPN Tunnel. +// +// Note. This is not being immediately populated and may appear after some time depending on +// NsxtIpSecVpnTunnelSecurityProfile.DpdConfiguration +func (ipSecVpn *NsxtIpSecVpnTunnel) GetStatus() (*types.NsxtIpSecVpnTunnelStatus, error) { + client := ipSecVpn.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnelStatus + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if ipSecVpn.NsxtIpSecVpn.ID == "" { + return nil, fmt.Errorf("cannot get NSX-T IPsec VPN Tunnel status without ID") + } + + urlRef, err := ipSecVpn.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, ipSecVpn.edgeGatewayId, ipSecVpn.NsxtIpSecVpn.ID)) + if err != nil { + return nil, err + } + + ipSecVpnTunnelStatus := &types.NsxtIpSecVpnTunnelStatus{} + + err = ipSecVpn.client.OpenApiGetItem(minimumApiVersion, urlRef, nil, ipSecVpnTunnelStatus, nil) + if err != nil { + return nil, fmt.Errorf("error getting NSX-T IPsec VPN Tunnel status: %s", err) + } + + return ipSecVpnTunnelStatus, nil +} + +// UpdateTunnelConnectionProperties allows user to customize IPsec VPN Tunnel Security Profile when the default one +// does not fit requirements. +func (ipSecVpn *NsxtIpSecVpnTunnel) UpdateTunnelConnectionProperties(ipSecVpnTunnelConnectionProperties *types.NsxtIpSecVpnTunnelSecurityProfile) (*types.NsxtIpSecVpnTunnelSecurityProfile, error) { + client := ipSecVpn.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnelConnectionProperties + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if ipSecVpn.NsxtIpSecVpn.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T IPsec VPN Connection Properties without ID") + } + + urlRef, err := ipSecVpn.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, ipSecVpn.edgeGatewayId, ipSecVpn.NsxtIpSecVpn.ID)) + if err != nil { + return nil, err + } + + ipSecVpnTunnelProfile := &types.NsxtIpSecVpnTunnelSecurityProfile{} + err = ipSecVpn.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, ipSecVpnTunnelConnectionProperties, ipSecVpnTunnelProfile, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T IPsec VPN Connection Properties: %s", err) + } + + return ipSecVpnTunnelProfile, nil +} + +// GetTunnelConnectionProperties retrieves IPsec VPN Tunnel Security Profile +func (ipSecVpn *NsxtIpSecVpnTunnel) GetTunnelConnectionProperties() (*types.NsxtIpSecVpnTunnelSecurityProfile, error) { + client := ipSecVpn.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnelConnectionProperties + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if ipSecVpn.NsxtIpSecVpn.ID == "" { + return nil, fmt.Errorf("cannot get NSX-T IPsec VPN Connection Properties without ID") + } + + urlRef, err := ipSecVpn.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, ipSecVpn.edgeGatewayId, ipSecVpn.NsxtIpSecVpn.ID)) + if err != nil { + return nil, err + } + + ipSecVpnTunnelProfile := &types.NsxtIpSecVpnTunnelSecurityProfile{} + err = ipSecVpn.client.OpenApiGetItem(minimumApiVersion, urlRef, nil, ipSecVpnTunnelProfile, nil) + if err != nil { + return nil, fmt.Errorf("error retrieving NSX-T IPsec VPN Connection Properties: %s", err) + } + + return ipSecVpnTunnelProfile, nil +} + +// IsEqualTo helps to find NSX-T IPsec VPN Tunnel Configuration +// Combination of LocalAddress and RemoteAddress has to be unique (enforced by API). This is a list of fields compared: +// * LocalEndpoint.LocalAddress +// * RemoteEndpoint.RemoteAddress +func (ipSecVpn *NsxtIpSecVpnTunnel) IsEqualTo(vpnConfig *types.NsxtIpSecVpnTunnel) bool { + return ipSetVpnRulesEqual(ipSecVpn.NsxtIpSecVpn, vpnConfig) +} + +// ipSetVpnRulesEqual performs comparison of two NSX-T IPsec VPN Tunnels to ease lookup. This is a list of fields compared: +// * LocalEndpoint.LocalAddress +// * RemoteEndpoint.RemoteAddress +func ipSetVpnRulesEqual(first, second *types.NsxtIpSecVpnTunnel) bool { + util.Logger.Println("comparing NSX-T IP Sev VPN configuration:") + util.Logger.Printf("%+v\n", first) + util.Logger.Println("against:") + util.Logger.Printf("%+v\n", second) + + // These fields should be enough to cover uniqueness + if first.LocalEndpoint.LocalAddress == second.LocalEndpoint.LocalAddress && + first.RemoteEndpoint.RemoteAddress == second.RemoteEndpoint.RemoteAddress { + return true + } + + return false +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_nat_rule.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_nat_rule.go new file mode 100644 index 000000000..e0f5c00ad --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_nat_rule.go @@ -0,0 +1,294 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// NsxtNatRule describes a single NAT rule of 5 different Rule Types - DNAT`, `NO_DNAT`, `SNAT`, `NO_SNAT`, 'REFLEXIVE' +// 'REFLEXIVE' is only supported in API 35.2 (VCD 10.2.2+) +// +// A SNAT or a DNAT rule on an Edge Gateway in the VMware Cloud Director environment is always configured from the +// perspective of your organization VDC. +// DNAT and NO_DNAT - outside traffic going inside +// SNAT and NO_SNAT - inside traffic going outside +// More docs in https://docs.vmware.com/en/VMware-Cloud-Director/10.2/VMware-Cloud-Director-Tenant-Portal-Guide/GUID-9E43E3DC-C028-47B3-B7CA-59F0ED40E0A6.html +// +// Note. This structure and all its API calls will require at least API version 34.0, but will elevate it to 35.2 if +// possible because API 35.2 introduces support for 2 new fields FirewallMatch and Priority. +type NsxtNatRule struct { + NsxtNatRule *types.NsxtNatRule + client *Client + // edgeGatewayId is stored here so that pointer receiver functions can embed edge gateway ID into path + edgeGatewayId string +} + +// GetAllNatRules retrieves all NAT rules with an optional queryParameters filter. +func (egw *NsxtEdgeGateway) GetAllNatRules(queryParameters url.Values) ([]*NsxtNatRule, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + typeResponses := []*types.NsxtNatRule{{}} + err = client.OpenApiGetAllItems(apiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into NsxtNatRule types with client + wrappedResponses := make([]*NsxtNatRule, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &NsxtNatRule{ + NsxtNatRule: typeResponses[sliceIndex], + client: client, + edgeGatewayId: egw.EdgeGateway.ID, + } + } + + return wrappedResponses, nil +} + +// GetNatRuleByName finds a NAT rule by Name and returns it +// +// Note. API does not enforce name uniqueness therefore an error will be thrown if two rules with the same name exist +func (egw *NsxtEdgeGateway) GetNatRuleByName(name string) (*NsxtNatRule, error) { + // Ideally this function would use OpenAPI filters to perform server side filtering, but this endpoint does not + // support any filters - even ID. Therefore one must retrieve all items and look if there is an item with the same ID + allNatRules, err := egw.GetAllNatRules(nil) + if err != nil { + return nil, fmt.Errorf("error retriving all NSX-T NAT rules: %s", err) + } + + var allResults []*NsxtNatRule + + for _, natRule := range allNatRules { + if natRule.NsxtNatRule.Name == name { + allResults = append(allResults, natRule) + } + } + + if len(allResults) > 1 { + return nil, fmt.Errorf("error - found %d NSX-T NAT rules with name '%s'. Expected 1", len(allResults), name) + } + + if len(allResults) == 0 { + return nil, ErrorEntityNotFound + } + + return allResults[0], nil +} + +// GetNatRuleById finds a NAT rule by ID and returns it +func (egw *NsxtEdgeGateway) GetNatRuleById(id string) (*NsxtNatRule, error) { + // Ideally this function would use OpenAPI filters to perform server side filtering, but this endpoint does not + // support any filters - even ID. Therefore one must retrieve all items and look if there is an item with the same ID + allNatRules, err := egw.GetAllNatRules(nil) + if err != nil { + return nil, fmt.Errorf("error retriving all NSX-T NAT rules: %s", err) + } + + for _, natRule := range allNatRules { + if natRule.NsxtNatRule.ID == id { + return natRule, nil + } + } + + return nil, ErrorEntityNotFound +} + +// CreateNatRule creates a NAT rule and returns it. +// +// Note. API has a limitation, that it does not return ID for created rule. To work around it this function creates +// a NAT rule, fetches all rules and finds a rule with exactly the same field values and returns it (including ID) +// There is still a slight risk to retrieve wrong ID if exactly the same rule already exists. +func (egw *NsxtEdgeGateway) CreateNatRule(natRuleConfig *types.NsxtNatRule) (*NsxtNatRule, error) { + client := egw.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + // Insert Edge Gateway ID into endpoint path edgeGateways/%s/nat/rules + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, egw.EdgeGateway.ID)) + if err != nil { + return nil, err + } + + // Creating NAT rule must follow different way than usual OpenAPI one because this item has an API bug and + // NAT rule ID is not returned after this object is created. The only way to find its ID afterwards is to GET all + // items, and manually match it based on rule name, etc. + task, err := client.OpenApiPostItemAsync(apiVersion, urlRef, nil, natRuleConfig) + if err != nil { + return nil, fmt.Errorf("error creating NSX-T NAT rule: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("task failed while creating NSX-T NAT rule: %s", err) + } + + // queryParameters (API side filtering) are not used because pretty much nothing is accepted as filter (such fields as + // name, description, ruleType and even ID are not allowed + allNatRules, err := egw.GetAllNatRules(nil) + if err != nil { + return nil, fmt.Errorf("error fetching all NAT rules: %s", err) + } + + for index, singleRule := range allNatRules { + // Look for a matching rule + if singleRule.IsEqualTo(natRuleConfig) { + return allNatRules[index], nil + + } + } + return nil, fmt.Errorf("rule '%s' of type '%s' not found after creation", natRuleConfig.Name, natRuleConfig.RuleType) +} + +// Update allows users to update NSX-T NAT rule +func (nsxtNat *NsxtNatRule) Update(natRuleConfig *types.NsxtNatRule) (*NsxtNatRule, error) { + client := nsxtNat.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return nil, err + } + + if nsxtNat.NsxtNatRule.ID == "" { + return nil, fmt.Errorf("cannot update NSX-T NAT Rule without ID") + } + + urlRef, err := nsxtNat.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, nsxtNat.edgeGatewayId), nsxtNat.NsxtNatRule.ID) + if err != nil { + return nil, err + } + + returnObject := &NsxtNatRule{ + NsxtNatRule: &types.NsxtNatRule{}, + client: client, + edgeGatewayId: nsxtNat.edgeGatewayId, + } + + err = client.OpenApiPutItem(apiVersion, urlRef, nil, natRuleConfig, returnObject.NsxtNatRule, nil) + if err != nil { + return nil, fmt.Errorf("error updating NSX-T NAT Rule: %s", err) + } + + return returnObject, nil +} + +// Delete deletes NSX-T NAT rule +func (nsxtNat *NsxtNatRule) Delete() error { + client := nsxtNat.client + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules + apiVersion, err := client.getOpenApiHighestElevatedVersion(endpoint) + if err != nil { + return err + } + + if nsxtNat.NsxtNatRule.ID == "" { + return fmt.Errorf("cannot delete NSX-T NAT rule without ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, nsxtNat.edgeGatewayId), nsxtNat.NsxtNatRule.ID) + if err != nil { + return err + } + + err = client.OpenApiDeleteItem(apiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting NSX-T NAT Rule: %s", err) + } + + return nil +} + +// IsEqualTo allows to check if a rule has exactly the same fields (except ID) to the supplied rule +// This validation is very tricky because minor version changes impact how fields are return. +// This function relies on most common and stable fields: +// * Name +// * Enabled +// * Description +// * ExternalAddresses +// * InternalAddresses +// * ApplicationPortProfile.ID +func (nsxtNat *NsxtNatRule) IsEqualTo(rule *types.NsxtNatRule) bool { + return natRulesEqual(nsxtNat.NsxtNatRule, rule) +} + +// natRulesEqual is a helper to check if first and second supplied rules are exactly the same (except ID) +func natRulesEqual(first, second *types.NsxtNatRule) bool { + util.Logger.Println("comparing NAT rule:") + util.Logger.Printf("%+v\n", first) + util.Logger.Println("against:") + util.Logger.Printf("%+v\n", second) + + // Being an org user always returns logging as false - therefore cannot compare it. + // first.Logging == second.Logging + + // These fields are returned or not returned depending on version and it is impossible to be 100% sure a minor + // patch does not break such comparison + // DnatExternalPort + // SnatDestinationAddresses + // RuleType - would work up to 35.2+, but then there is another field Type + // Type only available since 35.2+. Must be explicitly used for REFLEXIVE type in API v36.0+ + // FirewallMatch - it exists only since API 35.2+ and has a default starting this version + // InternalPort - is deprecated since API V35.0+ and is replaced by DnatExternalPort + // Priority - is available only in API V35.2+ + // Version - it is something that is automatically handled by API. When creating - you must specify none, but it sets + // version to 0. When updating one must specify the last version read, and again it will automatically increment this + // value after update. (probably it is meant to avoid concurrent updates) + if first.Name == second.Name && + first.Enabled == second.Enabled && + first.Description == second.Description && + first.ExternalAddresses == second.ExternalAddresses && + first.InternalAddresses == second.InternalAddresses && + + // Match both application profiles being nil (types cannot be equal as they are pointers, not values) + ((first.ApplicationPortProfile == nil && second.ApplicationPortProfile == nil) || + // Or both being not nil and having the same IDs + (first.ApplicationPortProfile != nil && second.ApplicationPortProfile != nil && first.ApplicationPortProfile.ID == second.ApplicationPortProfile.ID) || + // Or first Application profile is nil and second is not nil, but has empty ID + (first.ApplicationPortProfile == nil && second.ApplicationPortProfile != nil && second.ApplicationPortProfile.ID == "") || + // Or first Application Profile is not nil, but has empty ID, while second application port profile is nil + (first.ApplicationPortProfile != nil && first.ApplicationPortProfile.ID == "" && second.ApplicationPortProfile == nil)) { + + return true + } + + return false +} + +// elevateNsxtNatRuleApiVersion helps to elevate API version to consume newer NSX-T NAT Rule features +// API V35.2+ support new fields FirewallMatch and Priority +// API V36.0+ supports new RuleType - REFLEXIVE +//func elevateNsxtNatRuleApiVersion(apiVersion string, client *Client) string { +// +// // Fields FirewallMatch and Priority require API version 35.2 to be set therefore version is elevated if API supports +// if client.APIVCDMaxVersionIs(">= 35.2") { +// apiVersion = "35.2" +// } +// +// // RuleType REFLEXIVE requires API V36.0 +// if client.APIVCDMaxVersionIs(">= 36.0") { +// apiVersion = "36.0" +// } +// +// return apiVersion +//} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_tier0_router.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_tier0_router.go new file mode 100644 index 000000000..971229b90 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxt_tier0_router.go @@ -0,0 +1,138 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// NsxtTier0Router +type NsxtTier0Router struct { + NsxtTier0Router *types.NsxtTier0Router + client *Client +} + +// GetImportableNsxtTier0RouterByName retrieves NSX-T tier 0 router by given parent NSX-T manager ID and Tier 0 router +// name +// +// Warning. The API returns only unused Tier-0 routers (the ones that are not used in external networks yet) +// +// Note. NSX-T manager ID is mandatory and must be in URN format (e.g. +// urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc) + +func (vcdClient *VCDClient) GetImportableNsxtTier0RouterByName(name, nsxtManagerId string) (*NsxtTier0Router, error) { + if nsxtManagerId == "" { + return nil, fmt.Errorf("no NSX-T manager ID specified") + } + + if !isUrn(nsxtManagerId) { + return nil, fmt.Errorf("NSX-T manager ID is not URN (e.g. 'urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc)', got: %s", nsxtManagerId) + } + + if name == "" { + return nil, fmt.Errorf("empty Tier 0 router name specified") + } + + // Ideally FIQL filter could be used to filter on server side and get only desired result, but filtering on + // 'displayName' is not yet supported. The only supported field for filtering is + // _context==urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc to specify parent NSX-T manager (This + // automatically happens in GetAllImportableNsxtTier0Routers()). The below filter injection is left as documentation. + /* + queryParameters := copyOrNewUrlValues(nil) + queryParameters.Add("filter", "displayName=="+name) + */ + + nsxtTier0Routers, err := vcdClient.GetAllImportableNsxtTier0Routers(nsxtManagerId, nil) + if err != nil { + return nil, fmt.Errorf("could not find NSX-T Tier-0 router with name '%s' for NSX-T manager with id '%s': %s", + name, nsxtManagerId, err) + } + + // TODO remove this when FIQL supports filtering on 'displayName' + nsxtTier0Routers = filterNsxtTier0RoutersInExternalNetworks(name, nsxtTier0Routers) + // EOF TODO remove this when FIQL supports filtering on 'displayName' + + if len(nsxtTier0Routers) == 0 { + // ErrorEntityNotFound is injected here for the ability to validate problem using ContainsNotFound() + return nil, fmt.Errorf("%s: no NSX-T Tier-0 router with name '%s' for NSX-T manager with id '%s' found", + ErrorEntityNotFound, name, nsxtManagerId) + } + + if len(nsxtTier0Routers) > 1 { + return nil, fmt.Errorf("more than one (%d) NSX-T Tier-0 router with name '%s' for NSX-T manager with id '%s' found", + len(nsxtTier0Routers), name, nsxtManagerId) + } + + return nsxtTier0Routers[0], nil +} + +// filterNsxtTier0RoutersInExternalNetworks is created as a fix for local filtering instead of using +// FIQL filter (because it does not support it). +func filterNsxtTier0RoutersInExternalNetworks(name string, allNnsxtTier0Routers []*NsxtTier0Router) []*NsxtTier0Router { + filteredNsxtTier0Routers := make([]*NsxtTier0Router, 0) + for index, nsxtTier0Router := range allNnsxtTier0Routers { + if allNnsxtTier0Routers[index].NsxtTier0Router.DisplayName == name { + filteredNsxtTier0Routers = append(filteredNsxtTier0Routers, nsxtTier0Router) + } + } + + return filteredNsxtTier0Routers + +} + +// GetAllImportableNsxtTier0Routers retrieves all NSX-T Tier-0 routers using OpenAPI endpoint. Query parameters can be +// supplied to perform additional filtering. By default it injects FIQL filter _context==nsxtManagerId (e.g. +// _context==urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc) because it is mandatory to list child Tier-0 +// routers. +// +// Warning. The API returns only unused Tier-0 routers (the ones that are not used in external networks yet) +// +// Note. IDs of Tier-0 routers do not have a standard and may look as strings when they are created using UI or as UUIDs +// when they are created using API +func (vcdClient *VCDClient) GetAllImportableNsxtTier0Routers(nsxtManagerId string, queryParameters url.Values) ([]*NsxtTier0Router, error) { + if !isUrn(nsxtManagerId) { + return nil, fmt.Errorf("NSX-T manager ID is not URN (e.g. 'urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc)', got: %s", nsxtManagerId) + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointImportableTier0Routers + minimumApiVersion, err := vcdClient.Client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vcdClient.Client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + // Get all Tier-0 routers that are accessible to an organization VDC. Routers that are already associated with an + // External Network are filtered out. The “_context” filter key must be set with the id of the NSX-T manager for which + // we want to get the Tier-0 routers for. + // + // _context==urn:vcloud:nsxtmanager:09722307-aee0-4623-af95-7f8e577c9ebc + + // Create a copy of queryParameters so that original queryParameters are not mutated (because a map is always a + // reference) + queryParams := queryParameterFilterAnd("_context=="+nsxtManagerId, queryParameters) + + typeResponses := []*types.NsxtTier0Router{{}} + err = vcdClient.Client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParams, &typeResponses, nil) + if err != nil { + return nil, err + } + + returnObjects := make([]*NsxtTier0Router, len(typeResponses)) + for sliceIndex := range typeResponses { + returnObjects[sliceIndex] = &NsxtTier0Router{ + NsxtTier0Router: typeResponses[sliceIndex], + client: &vcdClient.Client, + } + } + + return returnObjects, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcplease.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcplease.go new file mode 100644 index 000000000..09dfb5518 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcplease.go @@ -0,0 +1,69 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type responseEdgeDhcpLeases struct { + XMLName xml.Name `xml:"dhcpLeases"` + TimeStamp string `xml:"timeStamp"` + DhcpLease types.EdgeDhcpLease `xml:"dhcpLeaseInfo"` +} + +// GetNsxvActiveDhcpLeaseByMac finds active DHCP lease for a given hardware address (MAC) +func (egw *EdgeGateway) GetNsxvActiveDhcpLeaseByMac(mac string) (*types.EdgeDhcpLeaseInfo, error) { + if mac == "" { + return nil, fmt.Errorf("MAC address must be provided to lookup DHCP lease") + } + dhcpLeases, err := egw.GetAllNsxvDhcpLeases() + if err != nil { + return nil, err + } + + util.Logger.Printf("[DEBUG] Looking up active DHCP lease for MAC: %s", mac) + for _, lease := range dhcpLeases { + util.Logger.Printf("[DEBUG] Checking DHCP lease: %#+v", lease) + if lease.BindingState == "active" && lease.MacAddress == mac { + return lease, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetAllNsxvDhcpLeases retrieves all DHCP leases defined in NSX-V edge gateway +func (egw *EdgeGateway) GetAllNsxvDhcpLeases() ([]*types.EdgeDhcpLeaseInfo, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateways support DHCP") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeDhcpLeasePath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + dhcpLeases := &responseEdgeDhcpLeases{} + + // This query returns all DHCP leases + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read DHCP leases: %s", nil, dhcpLeases) + if err != nil { + return nil, err + } + + if dhcpLeases != nil && len(dhcpLeases.DhcpLease.DhcpLeaseInfos) == 0 { + util.Logger.Printf("[DEBUG] GetAllNsxvDhcpLeases found 0 leases available") + return nil, ErrorEntityNotFound + } + + return dhcpLeases.DhcpLease.DhcpLeaseInfos, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcprelay.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcprelay.go new file mode 100644 index 000000000..05ceb87de --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_dhcprelay.go @@ -0,0 +1,77 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// UpdateDhcpRelay updates DHCP relay settings for a particular edge gateway and returns them. The +// feature itself enables you to leverage your existing DHCP infrastructure from within NSX without +// any interruption to the IP address management in your environment. DHCP messages are relayed from +// virtual machine(s) to the designated DHCP server(s) in the physical world. This enables IP +// addresses within NSX to continue to be in sync with IP addresses in other environments. +func (egw *EdgeGateway) UpdateDhcpRelay(dhcpRelayConfig *types.EdgeDhcpRelay) (*types.EdgeDhcpRelay, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateways support DHCP relay") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeDhcpRelayPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusNoContent or if not an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error setting DHCP relay settings: %s", dhcpRelayConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + return egw.GetDhcpRelay() +} + +// GetDhcpRelay retrieves a structure of *types.EdgeDhcpRelay with all DHCP relay settings present +// on a particular edge gateway. +func (egw *EdgeGateway) GetDhcpRelay() (*types.EdgeDhcpRelay, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateways support DHCP relay") + } + response := &types.EdgeDhcpRelay{} + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeDhcpRelayPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // This query Edge gateway DHCP relay using proxied NSX-V API + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read edge gateway DHCP relay configuration: %s", nil, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// ResetDhcpRelay removes all configuration by sending a DELETE request for DHCP relay configuration +// endpoint +func (egw *EdgeGateway) ResetDhcpRelay() error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support DHCP relay") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeDhcpRelayPath) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Send a DELETE request to DHCP relay configuration endpoint + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to reset edge gateway DHCP relay configuration: %s", nil, &types.NSXError{}) + return err +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_firewall.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_firewall.go new file mode 100644 index 000000000..0c31d383d --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_firewall.go @@ -0,0 +1,225 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// requestEdgeFirewallRules nests EdgeFirewallRule as a convenience for unmarshalling POST requests +type requestEdgeFirewallRules struct { + XMLName xml.Name `xml:"firewallRules"` + EdgeFirewallRules []*types.EdgeFirewallRule `xml:"firewallRule"` +} + +// responseEdgeFirewallRules is used to unwrap response when retrieving +type responseEdgeFirewallRules struct { + XMLName xml.Name `xml:"firewall"` + Version string `xml:"version"` + EdgeFirewallRules requestEdgeFirewallRules `xml:"firewallRules"` +} + +// CreateNsxvFirewallRule creates firewall rule using proxied NSX-V API. It is a synchronous operation. +// It returns an object with all fields populated (including ID) +// If aboveRuleId is not empty, it will send a query parameter aboveRuleId= which instructs NSX to +// place this rule above the specified rule ID +func (egw *EdgeGateway) CreateNsxvFirewallRule(firewallRuleConfig *types.EdgeFirewallRule, aboveRuleId string) (*types.EdgeFirewallRule, error) { + if err := validateCreateNsxvFirewallRule(firewallRuleConfig, egw); err != nil { + return nil, err + } + + params := make(map[string]string) + if aboveRuleId != "" { + params["aboveRuleId"] = aboveRuleId + } + + // Wrap the provided rule for POST request + firewallRuleRequest := requestEdgeFirewallRules{ + EdgeFirewallRules: []*types.EdgeFirewallRule{firewallRuleConfig}, + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateFirewallPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + // The query must be wrapped differently, depending if it mus specify the "aboveRuleId" parameter + var resp *http.Response + if aboveRuleId == "" { + resp, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating firewall rule: %s", firewallRuleRequest, &types.NSXError{}) + } else { + errString := fmt.Sprintf("error creating firewall rule (aboveRuleId: %s): %%s", aboveRuleId) + resp, err = egw.client.ExecuteParamRequestWithCustomError(httpPath, params, http.MethodPost, types.AnyXMLMime, + errString, firewallRuleConfig, &types.NSXError{}) + } + if err != nil { + return nil, err + } + + // Location header should look similar to: + // [/network/edges/edge-1/firewall/config/rules/197157] + firewallRuleId, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readFirewallRule, err := egw.GetNsxvFirewallRuleById(firewallRuleId) + if err != nil { + return nil, fmt.Errorf("unable to retrieve firewall rule with ID (%s) after creation: %s", + firewallRuleId, err) + } + return readFirewallRule, nil +} + +// UpdateNsxvFirewallRule updates types.EdgeFirewallRule with all fields using proxied NSX-V API. +// Real firewall rule ID (not the number shown in UI) is mandatory to perform the update. +func (egw *EdgeGateway) UpdateNsxvFirewallRule(firewallRuleConfig *types.EdgeFirewallRule) (*types.EdgeFirewallRule, error) { + err := validateUpdateNsxvFirewallRule(firewallRuleConfig, egw) + if err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateFirewallPath + "/" + firewallRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result is either 204 for success, or an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating firewall rule : %s", firewallRuleConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readFirewallRule, err := egw.GetNsxvFirewallRuleById(firewallRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve firewall rule with ID (%s) after update: %s", + readFirewallRule.ID, err) + } + return readFirewallRule, nil +} + +// GetNsxvFirewallRuleById retrieves types.EdgeFirewallRule by real (not the number shown in UI) +// firewall rule ID as shown in the UI using proxied NSX-V API. +// It returns and error `ErrorEntityNotFound` if the firewall rule is not found +func (egw *EdgeGateway) GetNsxvFirewallRuleById(id string) (*types.EdgeFirewallRule, error) { + if err := validateGetNsxvFirewallRule(id, egw); err != nil { + return nil, err + } + + edgeFirewallRules, err := egw.GetAllNsxvFirewallRules() + if err != nil { + return nil, err + } + + util.Logger.Printf("[DEBUG] Searching for firewall rule with ID: %s", id) + for _, rule := range edgeFirewallRules { + util.Logger.Printf("[DEBUG] Checking rule: %#+v", rule) + if rule.ID != "" && rule.ID == id { + return rule, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetAllNsxvFirewallRules retrieves all firewall rules and returns []*types.EdgeFirewallRule or an +// error of type ErrorEntityNotFound if there are no firewall rules +func (egw *EdgeGateway) GetAllNsxvFirewallRules() ([]*types.EdgeFirewallRule, error) { + if !egw.HasAdvancedNetworking() { + return nil, fmt.Errorf("only advanced edge gateways support firewall rules") + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeFirewallPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + firewallRuleResponse := &responseEdgeFirewallRules{} + + // This query returns all application rules as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read firewall rules: %s", nil, firewallRuleResponse) + if err != nil { + return nil, err + } + + if len(firewallRuleResponse.EdgeFirewallRules.EdgeFirewallRules) == 0 { + return nil, ErrorEntityNotFound + } + + return firewallRuleResponse.EdgeFirewallRules.EdgeFirewallRules, nil +} + +// DeleteNsxvFirewallRuleById deletes types.EdgeFirewallRule by real (not the number shown in UI) +// firewall rule ID as shown in the UI using proxied NSX-V API. +// It returns and error `ErrorEntityNotFound` if the firewall rule is not found. +func (egw *EdgeGateway) DeleteNsxvFirewallRuleById(id string) error { + err := validateDeleteNsxvFirewallRule(id, egw) + if err != nil { + return err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateFirewallPath + "/" + id) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // check if the rule exists and pass back the error at it may be 'ErrorEntityNotFound' + _, err = egw.GetNsxvFirewallRuleById(id) + if err != nil { + return err + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete firewall rule: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +func validateCreateNsxvFirewallRule(firewallRuleConfig *types.EdgeFirewallRule, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support firewall rules") + } + + if firewallRuleConfig.Action == "" { + return fmt.Errorf("firewall rule must have action specified") + } + + return nil +} + +func validateUpdateNsxvFirewallRule(firewallRuleConfig *types.EdgeFirewallRule, egw *EdgeGateway) error { + if firewallRuleConfig.ID == "" { + return fmt.Errorf("firewall rule ID must be set for update") + } + + return validateCreateNsxvFirewallRule(firewallRuleConfig, egw) +} + +func validateGetNsxvFirewallRule(id string, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support firewall rules") + } + + if id == "" { + return fmt.Errorf("unable to retrieve firewall rule without ID") + } + + return nil +} + +func validateDeleteNsxvFirewallRule(id string, egw *EdgeGateway) error { + return validateGetNsxvFirewallRule(id, egw) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_ipset.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_ipset.go new file mode 100644 index 000000000..b846c4b64 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_ipset.go @@ -0,0 +1,256 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// CreateNsxvIpSet creates an IP set from *types.EdgeIpSet. IP set defines a group of IP addresses +// that you can add as the source or destination in a firewall rule or in DHCP relay configuration. +func (vdc *Vdc) CreateNsxvIpSet(ipSetConfig *types.EdgeIpSet) (*types.EdgeIpSet, error) { + if err := validateCreateNsxvIpSet(ipSetConfig); err != nil { + return nil, err + } + + vdcId, err := GetUuidFromHref(vdc.Vdc.HREF, true) + if err != nil { + return nil, fmt.Errorf("unable to get vdc ID from HREF: %s", err) + } + + // build a path for IP set creation. The endpoint should look like: + // https://_hostname_/network/services/ipset/f9daf2da-b4f9-4921-a2f4-d77a943a381c where the + // trailing UUID is vDC ID + httpPath, err := vdc.buildNsxvNetworkServiceEndpointURL(types.NsxvIpSetServicePath + "/" + vdcId) + if err != nil { + return nil, fmt.Errorf("could not get network services API endpoint for IP set: %s", err) + } + + // Success or an error of type types.NSXError is expected + _, err = vdc.client.ExecuteParamRequestWithCustomError(httpPath, nil, http.MethodPost, types.AnyXMLMime, + "error creating IP set: %s", ipSetConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + createdIpSet, err := vdc.GetNsxvIpSetByName(ipSetConfig.Name) + if err != nil { + return nil, fmt.Errorf("could not lookup newly created IP set with name %s: %s", ipSetConfig.Name, err) + } + + return createdIpSet, nil +} + +// UpdateNsxvIpSet sends all fields of ipSetConfig. Omiting a value may reset it. ID is mandatory to +// perform update. +// Because the API always requires a Revision to be sent - the update fetches latest revision number +// automatically and embeds into the update structure. +func (vdc *Vdc) UpdateNsxvIpSet(ipSetConfig *types.EdgeIpSet) (*types.EdgeIpSet, error) { + err := validateUpdateNsxvIpSet(ipSetConfig) + if err != nil { + return nil, err + } + + // Inject latest Revision for this IP set so that API accepts change + currentIpSet, err := vdc.GetNsxvIpSetById(ipSetConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not fetch current IP set: %s", err) + } + ipSetConfig.Revision = currentIpSet.Revision + + httpPath, err := vdc.buildNsxvNetworkServiceEndpointURL(types.NsxvIpSetServicePath + "/" + ipSetConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get network services API endpoint for IP set: %s", err) + } + + // Result is either 204 for success, or an error of type types.NSXError + errString := fmt.Sprintf("error while updating IP set with ID %s :%%s", ipSetConfig.ID) + _, err = vdc.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + errString, ipSetConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + updatedIpSet, err := vdc.GetNsxvIpSetById(ipSetConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not lookup updated IP set with ID %s: %s", ipSetConfig.ID, err) + } + + return updatedIpSet, nil +} + +// GetNsxvIpSetByName searches for IP set by name. Names are unique therefore it can find only one. +// Returns ErrorEntityNotFound if an IP set is not found +func (vdc *Vdc) GetNsxvIpSetByName(name string) (*types.EdgeIpSet, error) { + if err := validateGetNsxvIpSet("", name); err != nil { + return nil, err + } + + allIpSets, err := vdc.GetAllNsxvIpSets() + if err != nil { + return nil, err + } + + util.Logger.Printf("[DEBUG] Searching for IP set with name: %s", name) + for _, ipSet := range allIpSets { + util.Logger.Printf("[DEBUG] Checking IP set: %#+v", ipSet) + if ipSet.Name != "" && ipSet.Name == name { + return ipSet, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetNsxvIpSetById searches for IP set by ID. Returns ErrorEntityNotFound if an IP set is not found +func (vdc *Vdc) GetNsxvIpSetById(id string) (*types.EdgeIpSet, error) { + if err := validateGetNsxvIpSet(id, ""); err != nil { + return nil, err + } + + allIpSets, err := vdc.GetAllNsxvIpSets() + if err != nil { + return nil, err + } + + util.Logger.Printf("[DEBUG] Searching for IP set with id: %s", id) + for _, ipSet := range allIpSets { + util.Logger.Printf("[DEBUG] Checking IP set: %#+v", ipSet) + if ipSet.ID != "" && ipSet.ID == id { + return ipSet, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetNsxvIpSetByNameOrId uses the same identifier to search by name and by ID. Priority is to try +// and find the IP set by ID. If it is not found - then a search by name is performed. +func (vdc *Vdc) GetNsxvIpSetByNameOrId(identifier string) (*types.EdgeIpSet, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vdc.GetNsxvIpSetByName(name) } + getById := func(id string, refresh bool) (interface{}, error) { return vdc.GetNsxvIpSetById(id) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, true) + if entity == nil { + return nil, err + } + return entity.(*types.EdgeIpSet), err +} + +// GetAllNsxvIpSets retrieves all IP sets and returns []*types.EdgeIpSet or an +// error of type ErrorEntityNotFound if there are no IP sets +func (vdc *Vdc) GetAllNsxvIpSets() ([]*types.EdgeIpSet, error) { + vdcId, err := GetUuidFromHref(vdc.Vdc.HREF, true) + if err != nil { + return nil, fmt.Errorf("unable to get vdc ID from HREF: %s", err) + } + + // build a path for to read all IP sets in a scope. A scope is defined by vDC ID. The endpoint + // should look like: + // https://192.168.1.109/network/services/ipset/scope/f9daf2da-b4f9-4921-a2f4-d77a943a381c where + // the trailing UUID is vDC ID + httpPath, err := vdc.buildNsxvNetworkServiceEndpointURL(types.NsxvIpSetServicePath + "/scope/" + vdcId) + if err != nil { + return nil, fmt.Errorf("could not get network services API endpoint for IP set: %s", err) + } + + // Anonymous struct to unwrap list of IP sets + ipSetsResponse := &struct { + XMLName xml.Name `xml:"list"` + types.EdgeIpSets `xml:"ipset"` + }{} + + // This query returns all IP sets on the scope (scoped by vDC ID) + errString := fmt.Sprintf("unable to read IP sets for scope %s: %%s", vdcId) + _, err = vdc.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, errString, nil, ipSetsResponse) + if err != nil { + return nil, err + } + + if len(ipSetsResponse.EdgeIpSets) == 0 { + return nil, ErrorEntityNotFound + } + + return ipSetsResponse.EdgeIpSets, nil +} + +// DeleteNsxvIpSetById deletes IP set by its ID which is formatted as +// f9daf2da-b4f9-4921-a2f4-d77a943a381c:ipset-9 +func (vdc *Vdc) DeleteNsxvIpSetById(id string) error { + err := validateDeleteNsxvIpSet(id, "") + if err != nil { + return err + } + + // build a path for to delete exact IP set sample path is: DELETE API-URL/services/ipset/id:ipset-# + // https://192.168.1.109/network/services/ipset/f9daf2da-b4f9-4921-a2f4-d77a943a381c:ipset-9 + httpPath, err := vdc.buildNsxvNetworkServiceEndpointURL(types.NsxvIpSetServicePath + "/" + id) + if err != nil { + return fmt.Errorf("could not get network services API endpoint for IP set: %s", err) + } + + errString := fmt.Sprintf("unable to delete IP set with ID %s: %%s", id) + _, err = vdc.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + errString, nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +// DeleteNsxvIpSetById deletes IP set by its name +func (vdc *Vdc) DeleteNsxvIpSetByName(name string) error { + err := validateDeleteNsxvIpSet("", name) + if err != nil { + return err + } + + // Get IP set by name + ipSet, err := vdc.GetNsxvIpSetByName(name) + if err != nil { + return err + } + + return vdc.DeleteNsxvIpSetById(ipSet.ID) +} + +func validateCreateNsxvIpSet(ipSetConfig *types.EdgeIpSet) error { + + if ipSetConfig.Name == "" { + return fmt.Errorf("IP set must have name defined") + } + + if ipSetConfig.IPAddresses == "" { + return fmt.Errorf("IP set must IP addresses defined") + } + + return nil +} + +func validateUpdateNsxvIpSet(ipSetConfig *types.EdgeIpSet) error { + + if ipSetConfig.ID == "" { + return fmt.Errorf("IP set ID must be set for update") + } + + return validateCreateNsxvIpSet(ipSetConfig) +} + +func validateGetNsxvIpSet(id, name string) error { + if id == "" && name == "" { + return fmt.Errorf("at least name or ID must be provided") + } + + return nil +} + +func validateDeleteNsxvIpSet(id, name string) error { + return validateGetNsxvIpSet(id, name) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_nat.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_nat.go new file mode 100644 index 000000000..20d9701e9 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/nsxv_nat.go @@ -0,0 +1,201 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// requestEdgeNatRules nests EdgeNatRule as a convenience for unmarshalling POST requests +type requestEdgeNatRules struct { + XMLName xml.Name `xml:"natRules"` + EdgeNatRules []*types.EdgeNatRule `xml:"natRule"` +} + +// responseEdgeNatRules is used to unwrap response when retrieving +type responseEdgeNatRules struct { + XMLName xml.Name `xml:"nat"` + Version string `xml:"version"` + NatRules requestEdgeNatRules `xml:"natRules"` +} + +// CreateNsxvNatRule creates NAT rule using proxied NSX-V API. It is a synchronuous operation. +// It returns an object with all fields populated (including ID) +func (egw *EdgeGateway) CreateNsxvNatRule(natRuleConfig *types.EdgeNatRule) (*types.EdgeNatRule, error) { + if err := validateCreateNsxvNatRule(natRuleConfig, egw); err != nil { + return nil, err + } + + // Wrap the provided rule for POST request + natRuleRequest := requestEdgeNatRules{ + EdgeNatRules: []*types.EdgeNatRule{natRuleConfig}, + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateNatPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + // We expect to get http.StatusCreated or if not an error of type types.NSXError + resp, err := egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPost, types.AnyXMLMime, + "error creating NAT rule: %s", natRuleRequest, &types.NSXError{}) + if err != nil { + return nil, err + } + + // Location header should look similar to: + // [/network/edges/edge-1/nat/config/rules/197157] + natRuleId, err := extractNsxObjectIdFromPath(resp.Header.Get("Location")) + if err != nil { + return nil, err + } + + readNatRule, err := egw.GetNsxvNatRuleById(natRuleId) + if err != nil { + return nil, fmt.Errorf("unable to retrieve NAT rule with ID (%s) after creation: %s", + natRuleId, err) + } + return readNatRule, nil +} + +// UpdateNsxvNatRule updates types.EdgeNatRule with all fields using proxied NSX-V API. ID is +// mandatory to perform the update. +func (egw *EdgeGateway) UpdateNsxvNatRule(natRuleConfig *types.EdgeNatRule) (*types.EdgeNatRule, error) { + err := validateUpdateNsxvNatRule(natRuleConfig, egw) + if err != nil { + return nil, err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateNatPath + "/" + natRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // Result should be 204, if not we expect an error of type types.NSXError + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodPut, types.AnyXMLMime, + "error while updating NAT rule : %s", natRuleConfig, &types.NSXError{}) + if err != nil { + return nil, err + } + + readNatRule, err := egw.GetNsxvNatRuleById(natRuleConfig.ID) + if err != nil { + return nil, fmt.Errorf("unable to retrieve NAT rule with ID (%s) after update: %s", + readNatRule.ID, err) + } + return readNatRule, nil +} + +// GetNsxvNatRules returns a list of all NAT rules in a given edge gateway +func (egw *EdgeGateway) GetNsxvNatRules() ([]*types.EdgeNatRule, error) { + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeNatPath) + if err != nil { + return nil, fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + natRuleResponse := &responseEdgeNatRules{} + + // This query returns all application rules as the API does not have filtering options + _, err = egw.client.ExecuteRequest(httpPath, http.MethodGet, types.AnyXMLMime, + "unable to read NAT rule: %s", nil, natRuleResponse) + if err != nil { + return nil, err + } + return natRuleResponse.NatRules.EdgeNatRules, nil +} + +// GetNsxvNatRuleById retrieves types.EdgeNatRule by NAT rule ID as shown in the UI using proxied +// NSX-V API. +// It returns and error `ErrorEntityNotFound` if the NAT rule is not found. +func (egw *EdgeGateway) GetNsxvNatRuleById(id string) (*types.EdgeNatRule, error) { + if err := validateGetNsxvNatRule(id, egw); err != nil { + return nil, err + } + + edgeNatRules, err := egw.GetNsxvNatRules() + if err != nil { + return nil, err + } + + for _, rule := range edgeNatRules { + if rule.ID != "" && rule.ID == id { + return rule, nil + } + } + + return nil, ErrorEntityNotFound +} + +// DeleteNsxvNatRuleById deletes types.EdgeNatRule by NAT rule ID as shown in the UI using proxied +// NSX-V API. +// It returns and error `ErrorEntityNotFound` if the NAT rule is now found. +func (egw *EdgeGateway) DeleteNsxvNatRuleById(id string) error { + err := validateDeleteNsxvNatRule(id, egw) + if err != nil { + return err + } + + httpPath, err := egw.buildProxiedEdgeEndpointURL(types.EdgeCreateNatPath + "/" + id) + if err != nil { + return fmt.Errorf("could not get Edge Gateway API endpoint: %s", err) + } + + // check if the rule exists and pass back the error at it may be 'ErrorEntityNotFound' + _, err = egw.GetNsxvNatRuleById(id) + if err != nil { + return err + } + + _, err = egw.client.ExecuteRequestWithCustomError(httpPath, http.MethodDelete, types.AnyXMLMime, + "unable to delete nat rule: %s", nil, &types.NSXError{}) + if err != nil { + return err + } + + return nil +} + +func validateCreateNsxvNatRule(natRuleConfig *types.EdgeNatRule, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support NAT rules") + } + + if natRuleConfig.Action == "" { + return fmt.Errorf("NAT rule must have an action") + } + + if natRuleConfig.TranslatedAddress == "" { + return fmt.Errorf("NAT rule must translated address specified") + } + + return nil +} + +func validateUpdateNsxvNatRule(natRuleConfig *types.EdgeNatRule, egw *EdgeGateway) error { + if natRuleConfig.ID == "" { + return fmt.Errorf("NAT rule must ID must be set for update") + } + + return validateCreateNsxvNatRule(natRuleConfig, egw) +} + +func validateGetNsxvNatRule(id string, egw *EdgeGateway) error { + if !egw.HasAdvancedNetworking() { + return fmt.Errorf("only advanced edge gateways support NAT rules") + } + + if id == "" { + return fmt.Errorf("unable to retrieve NAT rule without ID") + } + + return nil +} + +func validateDeleteNsxvNatRule(id string, egw *EdgeGateway) error { + return validateGetNsxvNatRule(id, egw) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi.go new file mode 100644 index 000000000..fc66a88c4 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi.go @@ -0,0 +1,813 @@ +package govcd + +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + + "github.com/peterhellberg/link" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// This file contains generalised low level methods to interact with VCD OpenAPI REST endpoints as documented in +// https://{VCD_HOST}/docs. In addition to this there are OpenAPI browser endpoints for tenant and provider +// respectively https://{VCD_HOST}/api-explorer/tenant/tenant-name and https://{VCD_HOST}/api-explorer/provider . +// OpenAPI has functions supporting below REST methods: +// GET /items (gets a slice of types like `[]types.OpenAPIEdgeGateway` or even `[]json.RawMessage` to process JSON as text. +// POST /items - creates an item +// PUT /items/URN - updates an item with specified URN +// GET /items/URN - retrieves an item with specified URN +// DELETE /items/URN - deletes an item with specified URN +// +// GET endpoints support FIQL for filtering in field `filter`. (FIQL IETF doc - https://tools.ietf.org/html/draft-nottingham-atompub-fiql-00) +// Not all API fields are supported for FIQL filtering and sometimes they return odd errors when filtering is +// unsupported. No exact documentation exists so far. +// +// Note. All functions accepting URL reference (*url.URL) will make a copy of URL because they may mutate URL reference. +// The parameter is kept as *url.URL for convenience because standard library provides pointer values. +// +// OpenAPI versioning. +// OpenAPI was introduced in VCD 9.5 (with API version 31.0). Endpoints are being added with each VCD iteration. +// Internally hosted documentation (https://HOSTNAME/docs/) can be used to check which endpoints where introduced in +// which VCD API version. +// Additionally each OpenAPI endpoint has a semantic version in its path (e.g. +// https://HOSTNAME/cloudapi/1.0.0/auditTrail). This versioned endpoint should ensure compatibility as VCD evolves. + +// OpenApiIsSupported allows to check whether VCD supports OpenAPI. Each OpenAPI endpoint however is introduced with +// different VCD API versions so this is just a general check if OpenAPI is supported at all. Particular endpoint +// introduction version can be checked in self hosted docs (https://HOSTNAME/docs/) +func (client *Client) OpenApiIsSupported() bool { + // OpenAPI was introduced in VCD 9.5+ (API version 31.0+) + return client.APIVCDMaxVersionIs(">= 31") +} + +// OpenApiBuildEndpoint helps to construct OpenAPI endpoint by using already configured VCD HREF while requiring only +// the last bit for endpoint. This is a variadic function and multiple pieces can be supplied for convenience. Leading +// '/' is added automatically. +// Sample URL construct: https://HOST/cloudapi/endpoint +func (client *Client) OpenApiBuildEndpoint(endpoint ...string) (*url.URL, error) { + endpointString := client.VCDHREF.Scheme + "://" + client.VCDHREF.Host + "/cloudapi/" + strings.Join(endpoint, "") + urlRef, err := url.ParseRequestURI(endpointString) + if err != nil { + return nil, fmt.Errorf("error formatting OpenAPI endpoint: %s", err) + } + return urlRef, nil +} + +// OpenApiGetAllItems retrieves and accumulates all pages then parsing them to a single 'outType' object. It works by at +// first crawling pages and accumulating all responses into []json.RawMessage (as strings). Because there is no +// intermediate unmarshalling to exact `outType` for every page it unmarshals into response struct in one go. 'outType' +// must be a slice of object (e.g. []*types.OpenAPIEdgeGateway) because this response contains slice of structs. +// +// Note. Query parameter 'pageSize' is defaulted to 128 (maximum supported) unless it is specified in queryParams +func (client *Client) OpenApiGetAllItems(apiVersion string, urlRef *url.URL, queryParams url.Values, outType interface{}, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Getting all items from endpoint %s for parsing into %s type\n", + urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + // Page size is defaulted to 128 (maximum supported number) to reduce HTTP calls and improve performance unless caller + // provides other value + newQueryParams := defaultPageSize(queryParams, "128") + util.Logger.Printf("[TRACE] Will use 'pageSize=%s'", newQueryParams.Get("pageSize")) + + // Perform API call to initial endpoint. The function call recursively follows pages using Link headers "nextPage" + // until it crawls all results + responses, err := client.openApiGetAllPages(apiVersion, urlRefCopy, newQueryParams, outType, nil, additionalHeader) + if err != nil { + return fmt.Errorf("error getting all pages for endpoint %s: %s", urlRefCopy.String(), err) + } + + // Create a slice of raw JSON messages in text so that they can be unmarshalled to specified `outType` after multiple + // calls are executed + var rawJsonBodies []string + for _, singleObject := range responses { + rawJsonBodies = append(rawJsonBodies, string(singleObject)) + } + + // rawJsonBodies contains a slice of all response objects and they must be formatted as a JSON slice (wrapped + // into `[]`, separated with semicolons) so that unmarshalling to specified `outType` works in one go + allResponses := `[` + strings.Join(rawJsonBodies, ",") + `]` + + // Unmarshal all accumulated responses into `outType` + if err = json.Unmarshal([]byte(allResponses), &outType); err != nil { + return fmt.Errorf("error decoding values into type: %s", err) + } + + return nil +} + +// OpenApiGetItem is a low level OpenAPI client function to perform GET request for any item. +// The urlRef must point to ID of exact item (e.g. '/1.0.0/edgeGateways/{EDGE_ID}') +// It responds with HTTP 403: Forbidden - If the user is not authorized or the entity does not exist. When HTTP 403 is +// returned this function returns "ErrorEntityNotFound: API_ERROR" so that one can use ContainsNotFound(err) to +// differentiate when an objects was not found from any other error. +func (client *Client) OpenApiGetItem(apiVersion string, urlRef *url.URL, params url.Values, outType interface{}, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Getting item from endpoint %s with expected response of type %s", + urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + req := client.newOpenApiRequest(apiVersion, params, http.MethodGet, urlRefCopy, nil, additionalHeader) + resp, err := client.Http.Do(req) + if err != nil { + return fmt.Errorf("error performing GET request to %s: %s", urlRefCopy.String(), err) + } + + // Bypassing the regular path using function checkRespWithErrType and returning parsed error directly + // HTTP 403: Forbidden - is returned if the user is not authorized or the entity does not exist. + if resp.StatusCode == http.StatusForbidden { + err := ParseErr(types.BodyTypeJSON, resp, &types.OpenApiError{}) + closeErr := resp.Body.Close() + return fmt.Errorf("%s: %s [body close error: %s]", ErrorEntityNotFound, err, closeErr) + } + + // resp is ignored below because it is the same as above + _, err = checkRespWithErrType(types.BodyTypeJSON, resp, err, &types.OpenApiError{}) + + // Any other error occurred + if err != nil { + return fmt.Errorf("error in HTTP GET request: %s", err) + } + + if err = decodeBody(types.BodyTypeJSON, resp, outType); err != nil { + return fmt.Errorf("error decoding JSON response after GET: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + return nil +} + +// OpenApiPostItemSync is a low level OpenAPI client function to perform POST request for items that support synchronous +// requests. The urlRef must point to POST endpoint (e.g. '/1.0.0/edgeGateways') that supports synchronous requests. It +// will return an error when endpoint does not support synchronous requests (HTTP response status code is not 201). +// Response will be unmarshalled into outType. +// +// Note. Even though it may return error if the item does not support synchronous request - the object may still be +// created. OpenApiPostItem would handle both cases and always return created item. +func (client *Client) OpenApiPostItemSync(apiVersion string, urlRef *url.URL, params url.Values, payload, outType interface{}) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Posting %s item to endpoint %s with expected response of type %s", + reflect.TypeOf(payload), urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + resp, err := client.openApiPerformPostPut(http.MethodPost, apiVersion, urlRefCopy, params, payload, nil) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusCreated { + util.Logger.Printf("[TRACE] Synchronous task expected (HTTP status code 201). Got %d", resp.StatusCode) + + } + + if err = decodeBody(types.BodyTypeJSON, resp, outType); err != nil { + return fmt.Errorf("error decoding JSON response after POST: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + return nil +} + +// OpenApiPostItemAsync is a low level OpenAPI client function to perform POST request for items that support +// asynchronous requests. The urlRef must point to POST endpoint (e.g. '/1.0.0/edgeGateways') that supports asynchronous +// requests. It will return an error if item does not support asynchronous request (does not respond with HTTP 202). +// +// Note. Even though it may return error if the item does not support asynchronous request - the object may still be +// created. OpenApiPostItem would handle both cases and always return created item. +func (client *Client) OpenApiPostItemAsync(apiVersion string, urlRef *url.URL, params url.Values, payload interface{}) (Task, error) { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Posting async %s item to endpoint %s with expected task response", + reflect.TypeOf(payload), urlRefCopy.String()) + + if !client.OpenApiIsSupported() { + return Task{}, fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + resp, err := client.openApiPerformPostPut(http.MethodPost, apiVersion, urlRefCopy, params, payload, nil) + if err != nil { + return Task{}, err + } + + if resp.StatusCode != http.StatusAccepted { + return Task{}, fmt.Errorf("POST request expected async task (HTTP response 202), got %d", resp.StatusCode) + } + + err = resp.Body.Close() + if err != nil { + return Task{}, fmt.Errorf("error closing response body: %s", err) + } + + // Asynchronous case returns "Location" header pointing to XML task + taskUrl := resp.Header.Get("Location") + if taskUrl == "" { + return Task{}, fmt.Errorf("unexpected empty task HREF") + } + task := NewTask(client) + task.Task.HREF = taskUrl + + return *task, nil +} + +// OpenApiPostItem is a low level OpenAPI client function to perform POST request for item supporting synchronous or +// asynchronous requests. The urlRef must point to POST endpoint (e.g. '/1.0.0/edgeGateways'). When a task is +// synchronous - it will track task until it is finished and pick reference to marshal outType. +func (client *Client) OpenApiPostItem(apiVersion string, urlRef *url.URL, params url.Values, payload, outType interface{}, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Posting %s item to endpoint %s with expected response of type %s", + reflect.TypeOf(payload), urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + resp, err := client.openApiPerformPostPut(http.MethodPost, apiVersion, urlRefCopy, params, payload, additionalHeader) + if err != nil { + return err + } + + // Handle two cases of API behaviour - synchronous (response status code is 201) and asynchronous (response status + // code 202) + switch resp.StatusCode { + // Asynchronous case - must track task and get item HREF from there + case http.StatusAccepted: + taskUrl := resp.Header.Get("Location") + util.Logger.Printf("[TRACE] Asynchronous task detected, tracking task with HREF: %s", taskUrl) + task := NewTask(client) + task.Task.HREF = taskUrl + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error waiting completion of task (%s): %s", taskUrl, err) + } + + // Here we have to find the resource once more to return it populated. + // Task Owner ID is the ID of created object. ID must be used (although HREF exists in task) because HREF points to + // old XML API and here we need to pull data from OpenAPI. + + newObjectUrl := urlParseRequestURI(urlRefCopy.String() + task.Task.Owner.ID) + err = client.OpenApiGetItem(apiVersion, newObjectUrl, nil, outType, additionalHeader) + if err != nil { + return fmt.Errorf("error retrieving item after creation: %s", err) + } + + // Synchronous task - new item body is returned in response of HTTP POST request + case http.StatusCreated: + util.Logger.Printf("[TRACE] Synchronous task detected, marshalling outType '%s'", reflect.TypeOf(outType)) + if err = decodeBody(types.BodyTypeJSON, resp, outType); err != nil { + return fmt.Errorf("error decoding JSON response after POST: %s", err) + } + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + return nil +} + +// OpenApiPutItemSync is a low level OpenAPI client function to perform PUT request for items that support synchronous +// requests. The urlRef must point to ID of exact item (e.g. '/1.0.0/edgeGateways/{EDGE_ID}') and support synchronous +// requests. It will return an error when endpoint does not support synchronous requests (HTTP response status code is not 201). +// Response will be unmarshalled into outType. +// +// Note. Even though it may return error if the item does not support synchronous request - the object may still be +// updated. OpenApiPutItem would handle both cases and always return updated item. +func (client *Client) OpenApiPutItemSync(apiVersion string, urlRef *url.URL, params url.Values, payload, outType interface{}, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Putting %s item to endpoint %s with expected response of type %s", + reflect.TypeOf(payload), urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + resp, err := client.openApiPerformPostPut(http.MethodPut, apiVersion, urlRefCopy, params, payload, additionalHeader) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusCreated { + util.Logger.Printf("[TRACE] Synchronous task expected (HTTP status code 201). Got %d", resp.StatusCode) + + } + + if err = decodeBody(types.BodyTypeJSON, resp, outType); err != nil { + return fmt.Errorf("error decoding JSON response after PUT: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + return nil +} + +// OpenApiPutItemAsync is a low level OpenAPI client function to perform PUT request for items that support asynchronous +// requests. The urlRef must point to ID of exact item (e.g. '/1.0.0/edgeGateways/{EDGE_ID}') that supports asynchronous +// requests. It will return an error if item does not support asynchronous request (does not respond with HTTP 202). +// +// Note. Even though it may return error if the item does not support asynchronous request - the object may still be +// created. OpenApiPutItem would handle both cases and always return created item. +func (client *Client) OpenApiPutItemAsync(apiVersion string, urlRef *url.URL, params url.Values, payload interface{}, additionalHeader map[string]string) (Task, error) { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Putting async %s item to endpoint %s with expected task response", + reflect.TypeOf(payload), urlRefCopy.String()) + + if !client.OpenApiIsSupported() { + return Task{}, fmt.Errorf("OpenAPI is not supported on this VCD version") + } + resp, err := client.openApiPerformPostPut(http.MethodPut, apiVersion, urlRefCopy, params, payload, additionalHeader) + if err != nil { + return Task{}, err + } + + if resp.StatusCode != http.StatusAccepted { + return Task{}, fmt.Errorf("PUT request expected async task (HTTP response 202), got %d", resp.StatusCode) + } + + err = resp.Body.Close() + if err != nil { + return Task{}, fmt.Errorf("error closing response body: %s", err) + } + + // Asynchronous case returns "Location" header pointing to XML task + taskUrl := resp.Header.Get("Location") + if taskUrl == "" { + return Task{}, fmt.Errorf("unexpected empty task HREF") + } + task := NewTask(client) + task.Task.HREF = taskUrl + + return *task, nil +} + +// OpenApiPutItem is a low level OpenAPI client function to perform PUT request for any item. +// The urlRef must point to ID of exact item (e.g. '/1.0.0/edgeGateways/{EDGE_ID}') +// It handles synchronous and asynchronous tasks. When a task is synchronous - it will block until it is finished. +func (client *Client) OpenApiPutItem(apiVersion string, urlRef *url.URL, params url.Values, payload, outType interface{}, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Putting %s item to endpoint %s with expected response of type %s", + reflect.TypeOf(payload), urlRefCopy.String(), reflect.TypeOf(outType)) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + resp, err := client.openApiPerformPostPut(http.MethodPut, apiVersion, urlRefCopy, params, payload, additionalHeader) + + if err != nil { + return err + } + + // Handle two cases of API behaviour - synchronous (response status code is 201) and asynchronous (response status + // code 202) + switch resp.StatusCode { + // Asynchronous case - must track task and get item HREF from there + case http.StatusAccepted: + taskUrl := resp.Header.Get("Location") + util.Logger.Printf("[TRACE] Asynchronous task detected, tracking task with HREF: %s", taskUrl) + task := NewTask(client) + task.Task.HREF = taskUrl + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error waiting completion of task (%s): %s", taskUrl, err) + } + + // Here we have to find the resource once more to return it populated. Provided params ir ignored for retrieval. + err = client.OpenApiGetItem(apiVersion, urlRefCopy, nil, outType, additionalHeader) + if err != nil { + return fmt.Errorf("error retrieving item after updating: %s", err) + } + + // Synchronous task - new item body is returned in response of HTTP PUT request + case http.StatusOK: + util.Logger.Printf("[TRACE] Synchronous task detected, marshalling outType '%s'", reflect.TypeOf(outType)) + if err = decodeBody(types.BodyTypeJSON, resp, outType); err != nil { + return fmt.Errorf("error decoding JSON response after PUT: %s", err) + } + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing HTTP PUT response body: %s", err) + } + + return nil +} + +// OpenApiDeleteItem is a low level OpenAPI client function to perform DELETE request for any item. +// The urlRef must point to ID of exact item (e.g. '/1.0.0/edgeGateways/{EDGE_ID}') +// It handles synchronous and asynchronous tasks. When a task is synchronous - it will block until it is finished. +func (client *Client) OpenApiDeleteItem(apiVersion string, urlRef *url.URL, params url.Values, additionalHeader map[string]string) error { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + util.Logger.Printf("[TRACE] Deleting item at endpoint %s", urlRefCopy.String()) + + if !client.OpenApiIsSupported() { + return fmt.Errorf("OpenAPI is not supported on this VCD version") + } + + // Perform request + req := client.newOpenApiRequest(apiVersion, params, http.MethodDelete, urlRefCopy, nil, additionalHeader) + + resp, err := client.Http.Do(req) + if err != nil { + return err + } + + // resp is ignored below because it would be the same as above + _, err = checkRespWithErrType(types.BodyTypeJSON, resp, err, &types.OpenApiError{}) + if err != nil { + return fmt.Errorf("error in HTTP DELETE request: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %s", err) + } + + // OpenAPI may work synchronously or asynchronously. When working asynchronously - it will return HTTP 202 and + // `Location` header will contain reference to task so that it can be tracked. In DELETE case we do not care about any + // ID so if DELETE operation is synchronous (returns HTTP 201) - the request has already succeeded. + if resp.StatusCode == http.StatusAccepted { + taskUrl := resp.Header.Get("Location") + task := NewTask(client) + task.Task.HREF = taskUrl + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error waiting completion of task (%s): %s", taskUrl, err) + } + } + + return nil +} + +// openApiPerformPostPut is a shared function for all public PUT and POST function parts - OpenApiPostItemSync, +// OpenApiPostItemAsync, OpenApiPostItem, OpenApiPutItemSync, OpenApiPutItemAsync, OpenApiPutItem +func (client *Client) openApiPerformPostPut(httpMethod string, apiVersion string, urlRef *url.URL, params url.Values, payload interface{}, additionalHeader map[string]string) (*http.Response, error) { + // Marshal payload if we have one + body := new(bytes.Buffer) + if payload != nil { + marshaledJson, err := json.MarshalIndent(payload, "", " ") + if err != nil { + return nil, fmt.Errorf("error marshalling JSON data for %s request %s", httpMethod, err) + } + body = bytes.NewBuffer(marshaledJson) + } + + req := client.newOpenApiRequest(apiVersion, params, httpMethod, urlRef, body, additionalHeader) + resp, err := client.Http.Do(req) + if err != nil { + return nil, err + } + + // resp is ignored below because it is the same the one above + _, err = checkRespWithErrType(types.BodyTypeJSON, resp, err, &types.OpenApiError{}) + if err != nil { + return nil, fmt.Errorf("error in HTTP %s request: %s", httpMethod, err) + } + return resp, nil +} + +// openApiGetAllPages is a recursive function that helps to accumulate responses from multiple pages for GET query. It +// works by at first crawling pages and accumulating all responses into []json.RawMessage (as strings). Because there is +// no intermediate unmarshalling to exact `outType` for every page it can unmarshal into direct `outType` supplied. +// outType must be a slice of object (e.g. []*types.OpenApiRole) because accumulated responses are in JSON list +// +// It follows pages in two ways: +// * Finds a 'nextPage' link and uses it to recursively crawl all pages (default for all, except for API bug) +// * Uses fields 'resultTotal', 'page', and 'pageSize' to calculate if it should crawl further on. It is only done +// because there is a BUG in API and in some endpoints it does not return 'nextPage' link as well as null 'pageCount' +// +// In general 'nextPage' header is preferred because some endpoints +// (like cloudapi/1.0.0/nsxTResources/importableTier0Routers) do not contain pagination details and nextPage header +// contains a base64 encoded data chunk via a supplied `cursor` field +// (e.g. ...importableTier0Routers?filter=_context==urn:vcloud:nsxtmanager:85aa2514-6a6f-4a32-8904-9695dc0f0298& +// cursor=eyJORVRXT1JLSU5HX0NVUlNPUl9PRkZTRVQiOiIwIiwicGFnZVNpemUiOjEsIk5FVFdPUktJTkdfQ1VSU09SIjoiMDAwMTMifQ==) +// The 'cursor' in example contains such values {"NETWORKING_CURSOR_OFFSET":"0","pageSize":1,"NETWORKING_CURSOR":"00013"} +func (client *Client) openApiGetAllPages(apiVersion string, urlRef *url.URL, queryParams url.Values, outType interface{}, responses []json.RawMessage, additionalHeader map[string]string) ([]json.RawMessage, error) { + // copy passed in URL ref so that it is not mutated + urlRefCopy := copyUrlRef(urlRef) + + if responses == nil { + responses = []json.RawMessage{} + } + + // Perform request + req := client.newOpenApiRequest(apiVersion, queryParams, http.MethodGet, urlRefCopy, nil, additionalHeader) + + resp, err := client.Http.Do(req) + if err != nil { + return nil, err + } + + // resp is ignored below because it is the same as above + _, err = checkRespWithErrType(types.BodyTypeJSON, resp, err, &types.OpenApiError{}) + if err != nil { + return nil, fmt.Errorf("error in HTTP GET request: %s", err) + } + + // Pages will unwrap pagination and keep a slice of raw json message to marshal to specific types + pages := &types.OpenApiPages{} + + if err = decodeBody(types.BodyTypeJSON, resp, pages); err != nil { + return nil, fmt.Errorf("error decoding JSON page response: %s", err) + } + + err = resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("error closing response body: %s", err) + } + + // Accumulate all responses in a single page as JSON text using json.RawMessage + // After pages are unwrapped one can marshal response into specified type + // singleQueryResponses := &json.RawMessage{} + var singleQueryResponses []json.RawMessage + if err = json.Unmarshal(pages.Values, &singleQueryResponses); err != nil { + return nil, fmt.Errorf("error decoding values into accumulation type: %s", err) + } + responses = append(responses, singleQueryResponses...) + + // Check if there is still 'nextPage' linked and continue accumulating responses if so + nextPageUrlRef, err := findRelLink("nextPage", resp.Header) + if err != nil && !IsNotFound(err) { + return nil, fmt.Errorf("error looking for 'nextPage' in 'Link' header: %s", err) + } + + if nextPageUrlRef != nil { + responses, err = client.openApiGetAllPages(apiVersion, nextPageUrlRef, url.Values{}, outType, responses, additionalHeader) + if err != nil { + return nil, fmt.Errorf("got error on page %d: %s", pages.Page, err) + } + } + + // If nextPage header was not found, but we are not at the last page - the query URL should be forged manually to + // overcome OpenAPI BUG when it does not return 'nextPage' header + // Some API calls do not return `OpenApiPages` results at all (just values) + // In some endpoints the page field is returned as `null` and this code block cannot handle it. + if nextPageUrlRef == nil && pages.PageSize != 0 && pages.Page != 0 { + // Next URL page ref was not found therefore one must double-check if it is not an API BUG. There are endpoints which + // return only Total results and pageSize (not 'pageCount' and not 'nextPage' header) + pageCount := pages.ResultTotal / pages.PageSize // This division returns number of "full pages" (containing 'pageSize' amount of results) + if pages.ResultTotal%pages.PageSize > 0 { // Check if is an incomplete page (containing less than 'pageSize' results) + pageCount++ // Total pageCount is "number of complete pages + 1 incomplete" if it exists) + } + if pages.Page < pageCount { + // Clone all originally supplied query parameters to avoid overwriting them + urlQueryString := queryParams.Encode() + urlQuery, err := url.ParseQuery(urlQueryString) + if err != nil { + return nil, fmt.Errorf("error cloning queryParams: %s", err) + } + + // Increase page query by one to fetch "next" page + urlQuery.Set("page", strconv.Itoa(pages.Page+1)) + + responses, err = client.openApiGetAllPages(apiVersion, urlRefCopy, urlQuery, outType, responses, additionalHeader) + if err != nil { + return nil, fmt.Errorf("got error on page %d: %s", pages.Page, err) + } + } + + } + + return responses, nil +} + +// newOpenApiRequest is a low level function used in upstream OpenAPI functions which handles logging and +// authentication for each API request +func (client *Client) newOpenApiRequest(apiVersion string, params url.Values, method string, reqUrl *url.URL, body io.Reader, additionalHeader map[string]string) *http.Request { + // copy passed in URL ref so that it is not mutated + reqUrlCopy := copyUrlRef(reqUrl) + + // Add the params to our URL + reqUrlCopy.RawQuery += params.Encode() + + // If the body contains data - try to read all contents for logging and re-create another + // io.Reader with all contents to use it down the line + var readBody []byte + var err error + if body != nil { + readBody, err = ioutil.ReadAll(body) + if err != nil { + util.Logger.Printf("[DEBUG - newOpenApiRequest] error reading body: %s", err) + } + body = bytes.NewReader(readBody) + } + + req, err := http.NewRequest(method, reqUrlCopy.String(), body) + if err != nil { + util.Logger.Printf("[DEBUG - newOpenApiRequest] error getting new request: %s", err) + } + + if client.VCDAuthHeader != "" && client.VCDToken != "" { + // Add the authorization header + req.Header.Add(client.VCDAuthHeader, client.VCDToken) + // The deprecated authorization token is 32 characters long + // The bearer token is 612 characters long + if len(client.VCDToken) > 32 { + req.Header.Add("Authorization", "bearer "+client.VCDToken) + req.Header.Add("X-Vmware-Vcloud-Token-Type", "Bearer") + } + // Add the Accept header for VCD + acceptMime := types.JSONMime + ";version=" + apiVersion + req.Header.Add("Accept", acceptMime) + } + + for k, v := range client.customHeader { + for _, v1 := range v { + req.Header.Set(k, v1) + } + } + for k, v := range additionalHeader { + req.Header.Add(k, v) + } + + // Inject JSON mime type + req.Header.Add("Content-Type", types.JSONMime) + + setHttpUserAgent(client.UserAgent, req) + + // Avoids passing data if the logging of requests is disabled + if util.LogHttpRequest { + payload := "" + if req.ContentLength > 0 { + payload = string(readBody) + } + util.ProcessRequestOutput(util.FuncNameCallStack(), method, reqUrlCopy.String(), payload, req) + debugShowRequest(req, payload) + } + + return req +} + +// findRelLink looks for link to "nextPage" in "Link" header. It will return when first occurrence is found. +// Sample Link header: +// Link: [;rel="lastPage"; +// type="application/json";model="AuditTrailEvents" ; +// rel="nextPage";type="application/json";model="AuditTrailEvents"] +// Returns *url.Url or ErrorEntityNotFound +func findRelLink(relFieldName string, header http.Header) (*url.URL, error) { + headerLinks := link.ParseHeader(header) + + for relKeyName, linkAddress := range headerLinks { + switch { + // When map key has more than one name (separated by space). In such cases it can have map key as + // "lastPage nextPage" when nextPage==lastPage or similar and one specific field needs to be matched. + case strings.Contains(relKeyName, " "): + relNameSlice := strings.Split(relKeyName, " ") + for _, oneRelName := range relNameSlice { + if oneRelName == relFieldName { + return url.Parse(linkAddress.String()) + } + } + case relKeyName == relFieldName: + return url.Parse(linkAddress.String()) + } + } + + return nil, ErrorEntityNotFound +} + +// jsonRawMessagesToStrings converts []*json.RawMessage to []string +func jsonRawMessagesToStrings(messages []json.RawMessage) []string { + resultString := make([]string, len(messages)) + for index, message := range messages { + resultString[index] = string(message) + } + + return resultString +} + +// copyOrNewUrlValues either creates a copy of parameters or instantiates a new url.Values if nil parameters are +// supplied. It helps to avoid mutating supplied parameter when additional values must be injected internally. +func copyOrNewUrlValues(parameters url.Values) url.Values { + parameterCopy := make(map[string][]string) + + // if supplied parameters are nil - we just return new initialized + if parameters == nil { + return parameterCopy + } + + // Copy URL values + for key, value := range parameters { + parameterCopy[key] = value + } + + return parameterCopy +} + +// queryParameterFilterAnd is a helper to append "AND" clause to FIQL filter by using ';' (semicolon) if any values are +// already set in 'filter' value of parameters. If none existed before then 'filter' value will be set. +// +// Note. It does a copy of supplied 'parameters' value and does not mutate supplied original parameters. +func queryParameterFilterAnd(filter string, parameters url.Values) url.Values { + newParameters := copyOrNewUrlValues(parameters) + + existingFilter := newParameters.Get("filter") + if existingFilter == "" { + newParameters.Set("filter", filter) + return newParameters + } + + newParameters.Set("filter", existingFilter+";"+filter) + return newParameters +} + +// defaultPageSize allows to set 'pageSize' query parameter to defaultPageSize if one is not already specified in +// url.Values while preserving all other supplied url.Values +func defaultPageSize(queryParams url.Values, defaultPageSize string) url.Values { + newQueryParams := url.Values{} + if queryParams != nil { + newQueryParams = queryParams + } + + if _, ok := newQueryParams["pageSize"]; !ok { + newQueryParams.Set("pageSize", defaultPageSize) + } + + return newQueryParams +} + +// copyUrlRef creates a copy of URL reference by re-parsing it +func copyUrlRef(in *url.URL) *url.URL { + // error is ignored because we expect to have correct URL supplied and this greatly simplifies code inside. + newUrlRef, err := url.Parse(in.String()) + if err != nil { + util.Logger.Printf("[DEBUG - copyUrlRef] error parsing URL: %s", err) + } + return newUrlRef +} + +// shouldDoSlowSearch returns true if query isn't working or added needed params if returns false. +// When the name contains commas, semicolons or asterisks, the encoding is rejected by the API in VCD 10.2 version. +// For this reason, when one or more commas, semicolons or asterisks are present we run the search brute force, +// by fetching all and comparing the name. Yet, this is not needed anymore in VCD 10.3 version. +// Also, url.QueryEscape as well as url.Values.Encode() both encode the space as a + character. So we use +// search brute force too. Reference to issue: +// https://github.com/golang/go/issues/4013 +// https://github.com/czos/goamz/pull/11/files +func shouldDoSlowSearch(filterKey, name string, client *Client) (bool, url.Values, error) { + var params = url.Values{} + slowSearch := false + versionWithNoBug, err := client.VersionEqualOrGreater("10.3", 2) + if err != nil { + return false, params, err + } + if (!versionWithNoBug && (strings.Contains(name, ",") || strings.Contains(name, ";"))) || + strings.Contains(name, " ") || strings.Contains(name, "+") || strings.Contains(name, "*") { + slowSearch = true + } else { + params.Set("filter", fmt.Sprintf(filterKey+"==%s", url.QueryEscape(name))) + params.Set("filterEncoded", "true") + } + return slowSearch, params, err +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_endpoints.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_endpoints.go new file mode 100644 index 000000000..31dfadd2b --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_endpoints.go @@ -0,0 +1,179 @@ +package govcd + +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "sort" + "strings" + + "github.com/vmware/go-vcloud-director/v2/util" + + "github.com/hashicorp/go-version" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// endpointMinApiVersions holds mapping of OpenAPI endpoints and API versions they were introduced in. +var endpointMinApiVersions = map[string]string{ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRights: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsCategories: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointGlobalRoles: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + types.OpenApiEndpointRights: "31.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAuditTrail: "33.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointImportableTier0Routers: "32.0", + // OpenApiEndpointExternalNetworks endpoint support was introduced with version 32.0 however it was still not stable + // enough to be used. (i.e. it did not support update "PUT") + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks: "33.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies: "32.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcAssignedComputePolicies: "33.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSessionCurrent: "34.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeClusters: "34.0", // VCD 10.1+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointEdgeGateways: "34.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointFirewallGroups: "34.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules: "34.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtFirewallRules: "34.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks: "32.0", // VCD 9.7+ for NSX-V, 10.1+ for NSX-T + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworksDhcp: "32.0", // VCD 9.7+ for NSX-V, 10.1+ for NSX-T + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcCapabilities: "32.0", + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAppPortProfiles: "34.0", // VCD 10.1+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnel: "34.0", // VCD 10.1+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnelConnectionProperties: "34.0", // VCD 10.1+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointIpSecVpnTunnelStatus: "34.0", // VCD 10.1+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsCandidateVdcs: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsDfwPolicies: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsDfwDefaultPolicies: "35.0", // VCD 10.2+ + + // NSX-T ALB (Advanced/AVI Load Balancer) support was introduced in 10.2 + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbController: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbImportableClouds: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbImportableServiceEngineGroups: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbCloud: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroups: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbEdgeGateway: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbServiceEngineGroupAssignments: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPools: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbPoolSummaries: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServices: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointAlbVirtualServiceSummaries: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibrary: "35.0", // VCD 10.2+ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSSLCertificateLibraryOld: "35.0", // VCD 10.2+ and deprecated from 10.3 +} + +// elevateNsxtNatRuleApiVersion helps to elevate API version to consume newer NSX-T NAT Rule features +// API V35.2+ supports new fields FirewallMatch and Priority +// API V36.0+ supports new RuleType - REFLEXIVE + +// endpointElevatedApiVersions endpoint elevated API versions +var endpointElevatedApiVersions = map[string][]string{ + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointNsxtNatRules: { + //"34.0", // Basic minimum required version + "35.2", // Introduces support for new fields FirewallMatch and Priority + "36.0", // Adds support for new NAT Rule Type - REFLEXIVE (field Type must be used instead of RuleType) + }, + types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointExternalNetworks: { + //"33.0", // Basic minimum required version + "35.0", // Deprecates field BackingType in favor of BackingTypeValue + "36.0", // Adds support new type of BackingTypeValue - IMPORTED_T_LOGICAL_SWITCH (backed by NSX-T segment) + }, +} + +// checkOpenApiEndpointCompatibility checks if VCD version (to which the client is connected) is sufficient to work with +// specified OpenAPI endpoint and returns either an error or the Api version to use for calling that endpoint. This Api +// version can then be supplied to low level OpenAPI client functions. +// If the system default API version is higher than endpoint introduction version - default system one is used. +func (client *Client) checkOpenApiEndpointCompatibility(endpoint string) (string, error) { + minimumApiVersion, ok := endpointMinApiVersions[endpoint] + if !ok { + return "", fmt.Errorf("minimum API version for endopoint '%s' is not defined", endpoint) + } + + if client.APIVCDMaxVersionIs("< " + minimumApiVersion) { + maxSupportedVersion, err := client.MaxSupportedVersion() + if err != nil { + return "", fmt.Errorf("error reading maximum supported API version: %s", err) + } + return "", fmt.Errorf("endpoint '%s' requires API version to support at least '%s'. Maximum supported version in this instance: '%s'", + endpoint, minimumApiVersion, maxSupportedVersion) + } + + // If default API version is higher than minimum required API version for endpoint - use the system default one. + if client.APIClientVersionIs("> " + minimumApiVersion) { + return client.APIVersion, nil + } + + return minimumApiVersion, nil +} + +// getOpenApiHighestElevatedVersion returns highest supported API version for particular endpoint +// These API versions must be defined in endpointElevatedApiVersions. If none are there - it will return minimum +// supported API versions just like client.checkOpenApiEndpointCompatibility(). +// +// The advantage of this functions is that it provides a controlled API elevation instead of just picking the highest +// which could be risky and untested (especially if new API version is released after release of package consuming this +// SDK) +func (client *Client) getOpenApiHighestElevatedVersion(endpoint string) (string, error) { + util.Logger.Printf("[DEBUG] Checking if elevated API versions are defined for endpoint '%s'", endpoint) + + // At first get minimum API version and check if it can be supported + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return "", fmt.Errorf("error getting minimum required API version: %s", err) + } + + // If no elevated versions are defined - return minimumApiVersion + elevatedVersionSlice, elevatedVersionsDefined := endpointElevatedApiVersions[endpoint] + if !elevatedVersionsDefined { + util.Logger.Printf("[DEBUG] No elevated API versions are defined for endpoint '%s'. Using minimum '%s'", + endpoint, minimumApiVersion) + return minimumApiVersion, nil + } + + util.Logger.Printf("[DEBUG] Found '%d' (%s) elevated API versions for endpoint '%s' ", + len(elevatedVersionSlice), strings.Join(elevatedVersionSlice, ", "), endpoint) + + // Reverse sort (highest to lowest) slice of elevated API versions so that we can start by highest supported and go down + versionsRaw := elevatedVersionSlice + versions := make([]*version.Version, len(versionsRaw)) + for i, raw := range versionsRaw { + v, err := version.NewVersion(raw) + if err != nil { + return "", fmt.Errorf("error evaluating version %s: %s", raw, err) + } + versions[i] = v + } + sort.Sort(sort.Reverse(version.Collection(versions))) + + var supportedElevatedVersion string + // Loop highest to the lowest elevated versions and try to find highest from the list of supported ones + for _, elevatedVersion := range versions { + + util.Logger.Printf("[DEBUG] Checking if elevated version '%s' is supported by VCD instance for endpoint '%s'", + elevatedVersion.Original(), endpoint) + // Check if maximum VCD API version supported is greater or equal to elevated version + if client.APIVCDMaxVersionIs(fmt.Sprintf(">= %s", elevatedVersion.Original())) { + util.Logger.Printf("[DEBUG] Elevated version '%s' is supported by VCD instance for endpoint '%s'", + elevatedVersion.Original(), endpoint) + // highest version found - store it and abort the loop + supportedElevatedVersion = elevatedVersion.Original() + break + } + util.Logger.Printf("[DEBUG] API version '%s' is not supported by VCD instance for endpoint '%s'", + elevatedVersion.Original(), endpoint) + } + + if supportedElevatedVersion == "" { + util.Logger.Printf("[DEBUG] No elevated API versions are supported for endpoint '%s'. Will use minimum "+ + "required version '%s'", endpoint, minimumApiVersion) + return minimumApiVersion, nil + } + + util.Logger.Printf("[DEBUG] Will use elevated version '%s for endpoint '%s'", + supportedElevatedVersion, endpoint) + return supportedElevatedVersion, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network.go new file mode 100644 index 000000000..0425209c7 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network.go @@ -0,0 +1,270 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// OpenApiOrgVdcNetwork uses OpenAPI endpoint to operate both - NSX-T and NSX-V Org VDC networks +type OpenApiOrgVdcNetwork struct { + OpenApiOrgVdcNetwork *types.OpenApiOrgVdcNetwork + client *Client +} + +// GetOpenApiOrgVdcNetworkById allows to retrieve both - NSX-T and NSX-V Org VDC networks +func (org *Org) GetOpenApiOrgVdcNetworkById(id string) (*OpenApiOrgVdcNetwork, error) { + // Inject Org ID filter to perform filtering on server side + params := url.Values{} + filterParams := queryParameterFilterAnd("orgRef.id=="+org.Org.ID, params) + return getOpenApiOrgVdcNetworkById(org.client, id, filterParams) +} + +// GetOpenApiOrgVdcNetworkById allows to retrieve both - NSX-T and NSX-V Org VDC networks +func (vdc *Vdc) GetOpenApiOrgVdcNetworkById(id string) (*OpenApiOrgVdcNetwork, error) { + // Inject Vdc ID filter to perform filtering on server side + params := url.Values{} + filterParams := queryParameterFilterAnd("orgVdc.id=="+vdc.Vdc.ID, params) + egw, err := getOpenApiOrgVdcNetworkById(vdc.client, id, filterParams) + if err != nil { + return nil, err + } + + return egw, nil +} + +// GetOpenApiOrgVdcNetworkByName allows to retrieve both - NSX-T and NSX-V Org VDC networks +func (vdc *Vdc) GetOpenApiOrgVdcNetworkByName(name string) (*OpenApiOrgVdcNetwork, error) { + queryParameters := url.Values{} + queryParameters.Add("filter", "name=="+name) + + allEdges, err := vdc.GetAllOpenApiOrgVdcNetworks(queryParameters) + if err != nil { + return nil, fmt.Errorf("unable to retrieve Org VDC network by name '%s': %s", name, err) + } + + return returnSingleOpenApiOrgVdcNetwork(name, allEdges) +} + +// GetAllOpenApiOrgVdcNetworks allows to retrieve all NSX-T or NSX-V Org VDC networks +// +// Note. If pageSize > 32 it will be limited to maximum of 32 in this function because API validation does not allow for +// higher number +func (vdc *Vdc) GetAllOpenApiOrgVdcNetworks(queryParameters url.Values) ([]*OpenApiOrgVdcNetwork, error) { + filteredQueryParams := queryParameterFilterAnd("orgVdc.id=="+vdc.Vdc.ID, queryParameters) + return getAllOpenApiOrgVdcNetworks(vdc.client, filteredQueryParams) +} + +// CreateOpenApiOrgVdcNetwork allows to create NSX-T or NSX-V Org VDC network +func (vdc *Vdc) CreateOpenApiOrgVdcNetwork(OrgVdcNetworkConfig *types.OpenApiOrgVdcNetwork) (*OpenApiOrgVdcNetwork, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnEgw := &OpenApiOrgVdcNetwork{ + OpenApiOrgVdcNetwork: &types.OpenApiOrgVdcNetwork{}, + client: vdc.client, + } + + err = vdc.client.OpenApiPostItem(minimumApiVersion, urlRef, nil, OrgVdcNetworkConfig, returnEgw.OpenApiOrgVdcNetwork, nil) + if err != nil { + return nil, fmt.Errorf("error creating Org VDC network: %s", err) + } + + return returnEgw, nil +} + +// Update allows to update Org VDC network +func (orgVdcNet *OpenApiOrgVdcNetwork) Update(OrgVdcNetworkConfig *types.OpenApiOrgVdcNetwork) (*OpenApiOrgVdcNetwork, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks + minimumApiVersion, err := orgVdcNet.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if OrgVdcNetworkConfig.ID == "" { + return nil, fmt.Errorf("cannot update Org VDC network without ID") + } + + urlRef, err := orgVdcNet.client.OpenApiBuildEndpoint(endpoint, OrgVdcNetworkConfig.ID) + if err != nil { + return nil, err + } + + returnEgw := &OpenApiOrgVdcNetwork{ + OpenApiOrgVdcNetwork: &types.OpenApiOrgVdcNetwork{}, + client: orgVdcNet.client, + } + + err = orgVdcNet.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, OrgVdcNetworkConfig, returnEgw.OpenApiOrgVdcNetwork, nil) + if err != nil { + return nil, fmt.Errorf("error updating Org VDC network: %s", err) + } + + return returnEgw, nil +} + +// Delete allows to delete Org VDC network +func (orgVdcNet *OpenApiOrgVdcNetwork) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks + minimumApiVersion, err := orgVdcNet.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if orgVdcNet.OpenApiOrgVdcNetwork.ID == "" { + return fmt.Errorf("cannot delete Org VDC network without ID") + } + + urlRef, err := orgVdcNet.client.OpenApiBuildEndpoint(endpoint, orgVdcNet.OpenApiOrgVdcNetwork.ID) + if err != nil { + return err + } + + err = orgVdcNet.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting Org VDC network: %s", err) + } + + return nil +} + +// GetType returns type of Org VDC network +func (orgVdcNet *OpenApiOrgVdcNetwork) GetType() string { + return orgVdcNet.OpenApiOrgVdcNetwork.NetworkType +} + +// IsIsolated returns true if the network type is isolated (NSX-V and NSX-T) +func (orgVdcNet *OpenApiOrgVdcNetwork) IsIsolated() bool { + return orgVdcNet.GetType() == types.OrgVdcNetworkTypeIsolated +} + +// IsRouted returns true if the network type is isolated (NSX-V and NSX-T) +func (orgVdcNet *OpenApiOrgVdcNetwork) IsRouted() bool { + return orgVdcNet.GetType() == types.OrgVdcNetworkTypeRouted +} + +// IsImported returns true if the network type is imported (NSX-T only) +func (orgVdcNet *OpenApiOrgVdcNetwork) IsImported() bool { + return orgVdcNet.GetType() == types.OrgVdcNetworkTypeOpaque +} + +// IsDirect returns true if the network type is direct (NSX-V only) +func (orgVdcNet *OpenApiOrgVdcNetwork) IsDirect() bool { + return orgVdcNet.GetType() == types.OrgVdcNetworkTypeDirect +} + +// getOpenApiOrgVdcNetworkById is a private parent for wrapped functions: +// func (org *Org) GetOpenApiOrgVdcNetworkById(id string) (*OpenApiOrgVdcNetwork, error) +// func (vdc *Vdc) GetOpenApiOrgVdcNetworkById(id string) (*OpenApiOrgVdcNetwork, error) +func getOpenApiOrgVdcNetworkById(client *Client, id string, queryParameters url.Values) (*OpenApiOrgVdcNetwork, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty Org VDC network ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + egw := &OpenApiOrgVdcNetwork{ + OpenApiOrgVdcNetwork: &types.OpenApiOrgVdcNetwork{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, queryParameters, egw.OpenApiOrgVdcNetwork, nil) + if err != nil { + return nil, err + } + + return egw, nil +} + +// returnSingleOpenApiOrgVdcNetwork helps to reduce code duplication for `GetOpenApiOrgVdcNetworkByName` functions with different +// receivers +func returnSingleOpenApiOrgVdcNetwork(name string, allEdges []*OpenApiOrgVdcNetwork) (*OpenApiOrgVdcNetwork, error) { + if len(allEdges) > 1 { + return nil, fmt.Errorf("got more than one Org VDC network by name '%s' %d", name, len(allEdges)) + } + + if len(allEdges) < 1 { + return nil, fmt.Errorf("%s: got zero Org VDC networks by name '%s'", ErrorEntityNotFound, name) + } + + return allEdges[0], nil +} + +// getAllOpenApiOrgVdcNetworks is a private parent for wrapped functions: +// func (vdc *Vdc) GetAllOpenApiOrgVdcNetworks(queryParameters url.Values) ([]*OpenApiOrgVdcNetwork, error) +// +// Note. If pageSize > 32 it will be limited to maximum of 32 in this function because API validation does not allow +// higher number +func getAllOpenApiOrgVdcNetworks(client *Client, queryParameters url.Values) ([]*OpenApiOrgVdcNetwork, error) { + + // Enforce maximum pageSize to be 32 as API endpoint throws error if it is > 32 + pageSizeString := queryParameters.Get("pageSize") + + switch pageSizeString { + // If no pageSize is specified it must be set to 32 as by default low level API function OpenApiGetAllItems sets 128 + case "": + queryParameters.Set("pageSize", "32") + + // If pageSize is specified ensure it is not >32 + default: + pageSizeValue, err := strconv.Atoi(pageSizeString) + if err != nil { + return nil, fmt.Errorf("error parsing pageSize value: %s", err) + } + if pageSizeString != "" && pageSizeValue > 32 { + queryParameters.Set("pageSize", "32") + } + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworks + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.OpenApiOrgVdcNetwork{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, nil) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into OpenApiOrgVdcNetwork types with client + wrappedResponses := make([]*OpenApiOrgVdcNetwork, len(typeResponses)) + for sliceIndex := range typeResponses { + wrappedResponses[sliceIndex] = &OpenApiOrgVdcNetwork{ + OpenApiOrgVdcNetwork: typeResponses[sliceIndex], + client: client, + } + } + + return wrappedResponses, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network_dhcp.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network_dhcp.go new file mode 100644 index 000000000..a059d1fd1 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/openapi_org_network_dhcp.go @@ -0,0 +1,112 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// OpenApiOrgVdcNetwork uses OpenAPI endpoint to operate both - NSX-T and NSX-V Org VDC network DHCP settings +type OpenApiOrgVdcNetworkDhcp struct { + OpenApiOrgVdcNetworkDhcp *types.OpenApiOrgVdcNetworkDhcp + client *Client +} + +// GetOpenApiOrgVdcNetworkDhcp allows to retrieve DHCP configuration for specific Org VDC network +// ID specified as orgNetworkId using OpenAPI +func (vdc *Vdc) GetOpenApiOrgVdcNetworkDhcp(orgNetworkId string) (*OpenApiOrgVdcNetworkDhcp, error) { + + client := vdc.client + // Inject Vdc ID filter to perform filtering on server side + params := url.Values{} + queryParameters := queryParameterFilterAnd("orgVdc.id=="+vdc.Vdc.ID, params) + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworksDhcp + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if orgNetworkId == "" { + return nil, fmt.Errorf("empty Org VDC network ID") + } + + urlRef, err := client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, orgNetworkId)) + if err != nil { + return nil, err + } + + orgNetDhcp := &OpenApiOrgVdcNetworkDhcp{ + OpenApiOrgVdcNetworkDhcp: &types.OpenApiOrgVdcNetworkDhcp{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, queryParameters, orgNetDhcp.OpenApiOrgVdcNetworkDhcp, nil) + if err != nil { + return nil, err + } + + return orgNetDhcp, nil +} + +// UpdateOpenApiOrgVdcNetworkDhcp allows to update DHCP configuration for specific Org VDC network +// ID specified as orgNetworkId using OpenAPI +func (vdc *Vdc) UpdateOpenApiOrgVdcNetworkDhcp(orgNetworkId string, orgVdcNetworkDhcpConfig *types.OpenApiOrgVdcNetworkDhcp) (*OpenApiOrgVdcNetworkDhcp, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworksDhcp + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, orgNetworkId)) + if err != nil { + return nil, err + } + + orgNetDhcpResponse := &OpenApiOrgVdcNetworkDhcp{ + OpenApiOrgVdcNetworkDhcp: &types.OpenApiOrgVdcNetworkDhcp{}, + client: vdc.client, + } + + err = vdc.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, orgVdcNetworkDhcpConfig, orgNetDhcpResponse.OpenApiOrgVdcNetworkDhcp, nil) + if err != nil { + return nil, fmt.Errorf("error updating Org VDC network DHCP configuration: %s", err) + } + + return orgNetDhcpResponse, nil +} + +// DeleteOpenApiOrgVdcNetworkDhcp allows to perform HTTP DELETE request on DHCP pool configuration for specified Org VDC +// Network ID +// +// Note. VCD Versions before 10.2 do not allow to perform "DELETE" on DHCP pool and will return error. The way to +// remove DHCP configuration is to recreate Org VDC network itself. +func (vdc *Vdc) DeleteOpenApiOrgVdcNetworkDhcp(orgNetworkId string) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworksDhcp + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if orgNetworkId == "" { + return fmt.Errorf("cannot delete Org VDC network DHCP configuration without ID") + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, orgNetworkId)) + if err != nil { + return err + } + + err = vdc.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting Org VDC network DHCP configuration: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/org.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/org.go new file mode 100644 index 000000000..9e89dd02e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/org.go @@ -0,0 +1,583 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type Org struct { + Org *types.Org + client *Client + TenantContext *TenantContext +} + +func NewOrg(client *Client) *Org { + return &Org{ + Org: new(types.Org), + client: client, + } +} + +// Given an org with a valid HREF, the function refetches the org +// and updates the user's org data. Otherwise if the function fails, +// it returns an error. Users should use refresh whenever they have +// a stale org due to the creation/update/deletion of a resource +// within the org or the org itself. +func (org *Org) Refresh() error { + if *org == (Org{}) { + return fmt.Errorf("cannot refresh, Object is empty") + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + unmarshalledOrg := &types.Org{} + + _, err := org.client.ExecuteRequest(org.Org.HREF, http.MethodGet, + "", "error refreshing organization: %s", nil, unmarshalledOrg) + if err != nil { + return err + } + org.Org = unmarshalledOrg + + // The request was successful + return nil +} + +// Given a valid catalog name, FindCatalog returns a Catalog object. +// If no catalog is found, then returns an empty catalog and no error. +// Otherwise it returns an error. +// Deprecated: use org.GetCatalogByName instead +func (org *Org) FindCatalog(catalogName string) (Catalog, error) { + + for _, link := range org.Org.Link { + if link.Rel == "down" && link.Type == "application/vnd.vmware.vcloud.catalog+xml" && link.Name == catalogName { + + cat := NewCatalog(org.client) + + _, err := org.client.ExecuteRequest(link.HREF, http.MethodGet, + "", "error retrieving catalog: %s", nil, cat.Catalog) + + return *cat, err + } + } + + return Catalog{}, nil +} + +// GetVdcByName if user specifies valid vdc name then this returns a vdc object. +// If no vdc is found, then it returns an empty vdc and no error. +// Otherwise it returns an empty vdc and an error. +// Deprecated: use org.GetVDCByName instead +func (org *Org) GetVdcByName(vdcname string) (Vdc, error) { + for _, link := range org.Org.Link { + if link.Name == vdcname { + vdc := NewVdc(org.client) + vdc.parent = org + + _, err := org.client.ExecuteRequest(link.HREF, http.MethodGet, + "", "error retrieving vdc: %s", nil, vdc.Vdc) + + return *vdc, err + } + } + return Vdc{}, nil +} + +// CreateCatalog creates a catalog with specified name and description +func CreateCatalog(client *Client, links types.LinkList, Name, Description string) (AdminCatalog, error) { + adminCatalog, err := CreateCatalogWithStorageProfile(client, links, Name, Description, nil) + if err != nil { + return AdminCatalog{}, nil + } + return *adminCatalog, nil +} + +// CreateCatalogWithStorageProfile is like CreateCatalog, but allows to specify storage profile +func CreateCatalogWithStorageProfile(client *Client, links types.LinkList, Name, Description string, storageProfiles *types.CatalogStorageProfiles) (*AdminCatalog, error) { + reqCatalog := &types.Catalog{ + Name: Name, + Description: Description, + } + vcomp := &types.AdminCatalog{ + Xmlns: types.XMLNamespaceVCloud, + Catalog: *reqCatalog, + CatalogStorageProfiles: storageProfiles, + } + + var createOrgLink *types.Link + for _, link := range links { + if link.Rel == "add" && link.Type == types.MimeAdminCatalog { + util.Logger.Printf("[TRACE] Create org - found the proper link for request, HREF: %s, "+ + "name: %s, type: %s, id: %s, rel: %s \n", link.HREF, link.Name, link.Type, link.ID, link.Rel) + createOrgLink = link + } + } + + if createOrgLink == nil { + return nil, fmt.Errorf("creating catalog failed to find url") + } + + catalog := NewAdminCatalog(client) + _, err := client.ExecuteRequest(createOrgLink.HREF, http.MethodPost, + "application/vnd.vmware.admin.catalog+xml", "error creating catalog: %s", vcomp, catalog.AdminCatalog) + + return catalog, err +} + +// CreateCatalog creates a catalog with given name and description under +// the given organization. Returns an Catalog that contains a creation +// task. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/POST-CreateCatalog.html +func (org *Org) CreateCatalog(name, description string) (Catalog, error) { + catalog, err := org.CreateCatalogWithStorageProfile(name, description, nil) + if err != nil { + return Catalog{}, err + } + return *catalog, nil +} + +// CreateCatalogWithStorageProfile is like CreateCatalog but additionally allows to specify storage profiles +func (org *Org) CreateCatalogWithStorageProfile(name, description string, storageProfiles *types.CatalogStorageProfiles) (*Catalog, error) { + catalog := NewCatalog(org.client) + adminCatalog, err := CreateCatalogWithStorageProfile(org.client, org.Org.Link, name, description, storageProfiles) + if err != nil { + return nil, err + } + catalog.Catalog = &adminCatalog.AdminCatalog.Catalog + return catalog, nil +} + +func validateVdcConfiguration(vdcDefinition *types.VdcConfiguration) error { + if vdcDefinition.Name == "" { + return errors.New("VdcConfiguration missing required field: Name") + } + if vdcDefinition.AllocationModel == "" { + return errors.New("VdcConfiguration missing required field: AllocationModel") + } + if vdcDefinition.ComputeCapacity == nil { + return errors.New("VdcConfiguration missing required field: ComputeCapacity") + } + if len(vdcDefinition.ComputeCapacity) != 1 { + return errors.New("VdcConfiguration invalid field: ComputeCapacity must only have one element") + } + if vdcDefinition.ComputeCapacity[0] == nil { + return errors.New("VdcConfiguration missing required field: ComputeCapacity[0]") + } + if vdcDefinition.ComputeCapacity[0].CPU == nil { + return errors.New("VdcConfiguration missing required field: ComputeCapacity[0].CPU") + } + if vdcDefinition.ComputeCapacity[0].CPU.Units == "" { + return errors.New("VdcConfiguration missing required field: ComputeCapacity[0].CPU.Units") + } + if vdcDefinition.ComputeCapacity[0].Memory == nil { + return errors.New("VdcConfiguration missing required field: ComputeCapacity[0].Memory") + } + if vdcDefinition.ComputeCapacity[0].Memory.Units == "" { + return errors.New("VdcConfiguration missing required field: ComputeCapacity[0].Memory.Units") + } + if vdcDefinition.VdcStorageProfile == nil || len(vdcDefinition.VdcStorageProfile) == 0 { + return errors.New("VdcConfiguration missing required field: VdcStorageProfile") + } + if vdcDefinition.VdcStorageProfile[0].Units == "" { + return errors.New("VdcConfiguration missing required field: VdcStorageProfile.Units") + } + if vdcDefinition.ProviderVdcReference == nil { + return errors.New("VdcConfiguration missing required field: ProviderVdcReference") + } + if vdcDefinition.ProviderVdcReference.HREF == "" { + return errors.New("VdcConfiguration missing required field: ProviderVdcReference.HREF") + } + return nil +} + +// GetCatalogByHref finds a Catalog by HREF +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (org *Org) GetCatalogByHref(catalogHref string) (*Catalog, error) { + cat := NewCatalog(org.client) + + _, err := org.client.ExecuteRequest(catalogHref, http.MethodGet, + "", "error retrieving catalog: %s", nil, cat.Catalog) + if err != nil { + return nil, err + } + // The request was successful + cat.parent = org + return cat, nil +} + +// GetCatalogByName finds a Catalog by Name +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +// +// refresh has no effect here, but is kept to preserve signature +func (org *Org) GetCatalogByName(catalogName string, refresh bool) (*Catalog, error) { + vdcQuery, err := org.queryCatalogByName(catalogName) + if ContainsNotFound(err) { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, fmt.Errorf("error querying Catalog: %s", err) + } + // This is not an AdminOrg and admin HREF must be removed if it exists + href := strings.Replace(vdcQuery.HREF, "/api/admin", "/api", 1) + return org.GetCatalogByHref(href) +} + +// GetCatalogById finds a Catalog by ID +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (org *Org) GetCatalogById(catalogId string, refresh bool) (*Catalog, error) { + vdcQuery, err := org.queryCatalogById(catalogId) + if ContainsNotFound(err) { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, fmt.Errorf("error querying Catalog: %s", err) + } + + // This is not an AdminOrg and admin HREF must be removed if it exists + href := strings.Replace(vdcQuery.HREF, "/api/admin", "/api", 1) + return org.GetCatalogByHref(href) +} + +// GetCatalogByNameOrId finds a Catalog by name or ID +// On success, returns a pointer to the Catalog structure and a nil error +// On failure, returns a nil pointer and an error +func (org *Org) GetCatalogByNameOrId(identifier string, refresh bool) (*Catalog, error) { + getByName := func(name string, refresh bool) (interface{}, error) { + return org.GetCatalogByName(name, refresh) + } + getById := func(id string, refresh bool) (interface{}, error) { + return org.GetCatalogById(id, refresh) + } + entity, err := getEntityByNameOrIdSkipNonId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*Catalog), err +} + +// GetVDCByHref finds a VDC by HREF +// On success, returns a pointer to the VDC structure and a nil error +// On failure, returns a nil pointer and an error +func (org *Org) GetVDCByHref(vdcHref string) (*Vdc, error) { + vdc := NewVdc(org.client) + _, err := org.client.ExecuteRequest(vdcHref, http.MethodGet, + "", "error retrieving VDC: %s", nil, vdc.Vdc) + if err != nil { + return nil, err + } + // The request was successful + vdc.parent = org + return vdc, nil +} + +// GetVDCByName finds a VDC by Name +// On success, returns a pointer to the VDC structure and a nil error +// On failure, returns a nil pointer and an error +// +// refresh has no effect and is kept to preserve signature +func (org *Org) GetVDCByName(vdcName string, refresh bool) (*Vdc, error) { + vdcQuery, err := org.queryOrgVdcByName(vdcName) + if ContainsNotFound(err) { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, fmt.Errorf("error querying VDC: %s", err) + } + // This is not an AdminOrg and admin HREF must be removed if it exists + href := strings.Replace(vdcQuery.HREF, "/api/admin", "/api", 1) + return org.GetVDCByHref(href) +} + +// GetVDCById finds a VDC by ID +// On success, returns a pointer to the VDC structure and a nil error +// On failure, returns a nil pointer and an error +// +// refresh has no effect and is kept to preserve signature +func (org *Org) GetVDCById(vdcId string, refresh bool) (*Vdc, error) { + vdcQuery, err := org.queryOrgVdcById(vdcId) + if ContainsNotFound(err) { + return nil, ErrorEntityNotFound + } + if err != nil { + return nil, fmt.Errorf("error querying VDC: %s", err) + } + + // This is not an AdminOrg and admin HREF must be removed if it exists + href := strings.Replace(vdcQuery.HREF, "/api/admin", "/api", 1) + return org.GetVDCByHref(href) +} + +// GetVDCByNameOrId finds a VDC by name or ID +// On success, returns a pointer to the VDC structure and a nil error +// On failure, returns a nil pointer and an error +// +// refresh has no effect and is kept to preserve signature +func (org *Org) GetVDCByNameOrId(identifier string, refresh bool) (*Vdc, error) { + getByName := func(name string, refresh bool) (interface{}, error) { + return org.GetVDCByName(name, refresh) + } + getById := func(id string, refresh bool) (interface{}, error) { + return org.GetVDCById(id, refresh) + } + entity, err := getEntityByNameOrIdSkipNonId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*Vdc), err +} + +// QueryCatalogList returns a list of catalogs for this organization +func (org *Org) QueryCatalogList() ([]*types.CatalogRecord, error) { + util.Logger.Printf("[DEBUG] QueryCatalogList with Org HREF %s", org.Org.HREF) + filter := map[string]string{ + "org": org.Org.HREF, + } + return queryCatalogList(org.client, filter) +} + +// GetTaskList returns Tasks for Organization and error. +func (org *Org) GetTaskList() (*types.TasksList, error) { + + for _, link := range org.Org.Link { + if link.Rel == "down" && link.Type == "application/vnd.vmware.vcloud.tasksList+xml" { + + tasksList := &types.TasksList{} + + _, err := org.client.ExecuteRequest(link.HREF, http.MethodGet, "", + "error getting taskList: %s", nil, tasksList) + if err != nil { + return nil, err + } + + return tasksList, nil + } + } + + return nil, fmt.Errorf("link not found") +} + +// queryOrgVdcByName returns a single QueryResultOrgVdcRecordType +func (org *Org) queryOrgVdcByName(vdcName string) (*types.QueryResultOrgVdcRecordType, error) { + filterFields := map[string]string{ + "org": org.Org.HREF, + "orgName": org.Org.Name, + "name": vdcName, + } + allVdcs, err := queryOrgVdcList(org.client, filterFields) + if err != nil { + return nil, err + } + + if allVdcs == nil || len(allVdcs) < 1 { + return nil, ErrorEntityNotFound + } + + if len(allVdcs) > 1 { + return nil, fmt.Errorf("found more than 1 VDC with Name '%s'", vdcName) + } + + return allVdcs[0], nil +} + +// queryOrgVdcById returns a single QueryResultOrgVdcRecordType +func (org *Org) queryOrgVdcById(vdcId string) (*types.QueryResultOrgVdcRecordType, error) { + filterMap := map[string]string{ + "org": org.Org.HREF, + "orgName": org.Org.Name, + "id": vdcId, + } + allVdcs, err := queryOrgVdcList(org.client, filterMap) + + if err != nil { + return nil, err + } + + if len(allVdcs) < 1 { + return nil, ErrorEntityNotFound + } + + return allVdcs[0], nil +} + +// queryCatalogByName returns a single CatalogRecord +func (org *Org) queryCatalogByName(catalogName string) (*types.CatalogRecord, error) { + filterMap := map[string]string{ + // Not injecting `org` or `orgName` here because shared catalogs may also appear here and they would have different + // parent Org + // "org": org.Org.HREF, + // "orgName": org.Org.Name, + "name": catalogName, + } + allCatalogs, err := queryCatalogList(org.client, filterMap) + if err != nil { + return nil, err + } + + if allCatalogs == nil || len(allCatalogs) < 1 { + return nil, ErrorEntityNotFound + } + + // To conform with this API standard it would be best to return an error if more than 1 item is found, but because + // previous method of getting Catalog by Name returned the first result we are doing the same here + // if len(allCatalogs) > 1 { + // return nil, fmt.Errorf("found more than 1 Catalog with Name '%s'", catalogName) + // } + + var localCatalog *types.CatalogRecord + // if multiple results are found - return the one defined in `org` (local) + if len(allCatalogs) > 1 { + util.Logger.Printf("[DEBUG] org.queryCatalogByName found %d Catalogs by name '%s'", len(allCatalogs), catalogName) + for _, catalog := range allCatalogs { + util.Logger.Printf("[DEBUG] org.queryCatalogByName found a Catalog by name '%s' in Org '%s'", catalogName, catalog.OrgName) + if catalog.OrgName == org.Org.Name { + util.Logger.Printf("[DEBUG] org.queryCatalogByName Catalog '%s' is local for Org '%s'. Prioritising it", + catalogName, org.Org.Name) + // Not interrupting the loop here to still dump all results to logs + localCatalog = catalog + } + } + } + + // local catalog was found - return it + if localCatalog != nil { + return localCatalog, nil + } + + // If only one catalog is found or multiple catalogs with no local ones - return the first one + return allCatalogs[0], nil +} + +// queryCatalogById returns a single QueryResultOrgVdcRecordType +func (org *Org) queryCatalogById(catalogId string) (*types.CatalogRecord, error) { + filterMap := map[string]string{ + // Not injecting `org` or `orgName` here because shared catalogs may also appear here and they would have different + // parent Org + // "org": org.Org.HREF, + // "orgName": org.Org.Name, + "id": catalogId, + } + allCatalogs, err := queryCatalogList(org.client, filterMap) + + if err != nil { + return nil, err + } + + if len(allCatalogs) < 1 { + return nil, ErrorEntityNotFound + } + + return allCatalogs[0], nil +} + +// QueryOrgVdcList returns all Org VDCs using query endpoint +// +// Note. Being a 'System' user it will not return any VDC +func (org *Org) QueryOrgVdcList() ([]*types.QueryResultOrgVdcRecordType, error) { + filter := map[string]string{ + "org": org.Org.HREF, + } + + return queryOrgVdcList(org.client, filter) +} + +// queryOrgVdcList performs an `orgVdc` or `adminOrgVdc` (for System user) and optionally applies filterFields +func queryOrgVdcList(client *Client, filterFields map[string]string) ([]*types.QueryResultOrgVdcRecordType, error) { + util.Logger.Printf("[DEBUG] queryOrgVdcList with filter %#v", filterFields) + queryType := client.GetQueryType(types.QtOrgVdc) + + filter := map[string]string{ + "type": queryType, + } + + // When a map of filters with non empty keys and values is supplied - apply it + if filterFields != nil { + filterSlice := make([]string, 0) + + for filterFieldName, filterFieldValue := range filterFields { + // Do not inject 'org' filter for System user as API returns an error + if !client.IsSysAdmin && filterFieldName == "org" { + continue + } + + if filterFieldName != "" && filterFieldValue != "" { + filterText := fmt.Sprintf("%s==%s", filterFieldName, url.QueryEscape(filterFieldValue)) + filterSlice = append(filterSlice, filterText) + } + } + + if len(filterSlice) > 0 { + filter["filter"] = strings.Join(filterSlice, ";") + filter["filterEncoded"] = "true" + } + } + + results, err := client.cumulativeQuery(queryType, nil, filter) + if err != nil { + return nil, fmt.Errorf("error querying Org VDCs %s", err) + } + + if client.IsSysAdmin { + return results.Results.OrgVdcAdminRecord, nil + } else { + return results.Results.OrgVdcRecord, nil + } +} + +func queryCatalogList(client *Client, filterFields map[string]string) ([]*types.CatalogRecord, error) { + util.Logger.Printf("[DEBUG] queryCatalogList with filter %#v", filterFields) + queryType := client.GetQueryType(types.QtCatalog) + + filter := map[string]string{ + "type": queryType, + } + + // When a map of filters with non empty keys and values is supplied - apply it + if filterFields != nil { + filterSlice := make([]string, 0) + + for filterFieldName, filterFieldValue := range filterFields { + // Do not inject 'org' filter for System user as API returns an error + if !client.IsSysAdmin && filterFieldName == "org" { + continue + } + + if filterFieldName != "" && filterFieldValue != "" { + filterText := fmt.Sprintf("%s==%s", filterFieldName, url.QueryEscape(filterFieldValue)) + filterSlice = append(filterSlice, filterText) + } + } + + if len(filterSlice) > 0 { + filter["filter"] = strings.Join(filterSlice, ";") + filter["filterEncoded"] = "true" + } + } + + results, err := client.cumulativeQuery(queryType, nil, filter) + if err != nil { + return nil, err + } + + var catalogs []*types.CatalogRecord + + if client.IsSysAdmin { + catalogs = results.Results.AdminCatalogRecord + } else { + catalogs = results.Results.CatalogRecord + } + util.Logger.Printf("[DEBUG] QueryCatalogList returned with : %#v and error: %s", catalogs, err) + return catalogs, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/orgvdcnetwork.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/orgvdcnetwork.go new file mode 100644 index 000000000..040d22236 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/orgvdcnetwork.go @@ -0,0 +1,352 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// OrgVDCNetwork an org vdc network client +type OrgVDCNetwork struct { + OrgVDCNetwork *types.OrgVDCNetwork + client *Client +} + +var reErrorBusy2 = regexp.MustCompile("is busy, cannot proceed with the operation.$") + +// NewOrgVDCNetwork creates an org vdc network client +func NewOrgVDCNetwork(cli *Client) *OrgVDCNetwork { + return &OrgVDCNetwork{ + OrgVDCNetwork: new(types.OrgVDCNetwork), + client: cli, + } +} + +func (orgVdcNet *OrgVDCNetwork) Refresh() error { + if orgVdcNet.OrgVDCNetwork.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + refreshUrl := orgVdcNet.OrgVDCNetwork.HREF + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + orgVdcNet.OrgVDCNetwork = &types.OrgVDCNetwork{} + + _, err := orgVdcNet.client.ExecuteRequest(refreshUrl, http.MethodGet, + "", "error retrieving vDC network: %s", nil, orgVdcNet.OrgVDCNetwork) + + return err +} + +// Delete a network. Fails if the network is busy. +// Returns a task to monitor the deletion. +func (orgVdcNet *OrgVDCNetwork) Delete() (Task, error) { + err := orgVdcNet.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing network: %s", err) + } + pathArr := strings.Split(orgVdcNet.OrgVDCNetwork.HREF, "/") + apiEndpoint := urlParseRequestURI(orgVdcNet.OrgVDCNetwork.HREF) + apiEndpoint.Path = "/api/admin/network/" + pathArr[len(pathArr)-1] + + var resp *http.Response + for { + req := orgVdcNet.client.NewRequest(map[string]string{}, http.MethodDelete, *apiEndpoint, nil) + resp, err = checkResp(orgVdcNet.client.Http.Do(req)) + if err != nil { + if reErrorBusy2.MatchString(err.Error()) { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error deleting Network: %s", err) + } + break + } + + task := NewTask(orgVdcNet.client) + + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +// RemoveOrgVdcNetworkIfExists looks for an Org Vdc network and, if found, will delete it. +func RemoveOrgVdcNetworkIfExists(vdc Vdc, networkName string) error { + network, err := vdc.GetOrgVdcNetworkByName(networkName, true) + + if IsNotFound(err) { + // Network not found. No action needed + return nil + } + if err != nil { + // Some other error happened during retrieval. We pass it along + return err + } + // The network was found. We attempt deletion + task, err := network.Delete() + if err != nil { + return fmt.Errorf("error deleting network '%s' [phase 1]: %s", networkName, err) + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error deleting network '%s' [task]: %s", networkName, err) + } + return nil +} + +// A wrapper call around CreateOrgVDCNetwork. +// Creates a network and then uses the associated task to monitor its configuration +func (vdc *Vdc) CreateOrgVDCNetworkWait(networkConfig *types.OrgVDCNetwork) error { + + task, err := vdc.CreateOrgVDCNetwork(networkConfig) + if err != nil { + return fmt.Errorf("error creating the network: %s", err) + } + if task == (Task{}) { + return fmt.Errorf("NULL task retrieved after network creation") + + } + err = task.WaitTaskCompletion() + // err = task.WaitInspectTaskCompletion(InspectTask, 10) + if err != nil { + return fmt.Errorf("error performing task: %s", err) + } + return nil +} + +// Fine tuning network creation function. +// Return an error (the result of the network creation) and a task (used to monitor +// the network configuration) +// This function can create any type of Org Vdc network. The exact type is determined by +// the combination of properties given with the network configuration structure. +func (vdc *Vdc) CreateOrgVDCNetwork(networkConfig *types.OrgVDCNetwork) (Task, error) { + for _, av := range vdc.Vdc.Link { + if av.Rel == "add" && av.Type == "application/vnd.vmware.vcloud.orgVdcNetwork+xml" { + createUrl, err := url.ParseRequestURI(av.HREF) + + if err != nil { + return Task{}, fmt.Errorf("error decoding vdc response: %s", err) + } + + networkConfig.Xmlns = types.XMLNamespaceVCloud + + output, err := xml.MarshalIndent(networkConfig, " ", " ") + if err != nil { + return Task{}, fmt.Errorf("error marshaling OrgVDCNetwork compose: %s", err) + } + + var resp *http.Response + for { + b := bytes.NewBufferString(xml.Header + string(output)) + util.Logger.Printf("[DEBUG] VCD Client configuration: %s", b) + req := vdc.client.NewRequest(map[string]string{}, http.MethodPost, *createUrl, b) + req.Header.Add("Content-Type", av.Type) + resp, err = checkResp(vdc.client.Http.Do(req)) + if err != nil { + if reErrorBusy2.MatchString(err.Error()) { + time.Sleep(3 * time.Second) + continue + } + return Task{}, fmt.Errorf("error instantiating a new OrgVDCNetwork: %s", err) + } + break + } + orgVDCNetwork := NewOrgVDCNetwork(vdc.client) + if err = decodeBody(types.BodyTypeXML, resp, orgVDCNetwork.OrgVDCNetwork); err != nil { + return Task{}, fmt.Errorf("error decoding orgvdcnetwork response: %s", err) + } + activeTasks := 0 + // Makes sure that there is only one active task for this network. + for _, taskItem := range orgVDCNetwork.OrgVDCNetwork.Tasks.Task { + if taskItem.HREF != "" { + activeTasks += 1 + if os.Getenv("GOVCD_DEBUG") != "" { + fmt.Printf("task %s (%s) is active\n", taskItem.HREF, taskItem.Status) + } + } + } + if activeTasks > 1 { + // By my understanding of the implementation, there should not be more than one task for this operation. + // If there is, we will need to change the logic of this function, as we can only return one task. (GM) + return Task{}, fmt.Errorf("found %d active tasks instead of one", activeTasks) + } + for _, taskItem := range orgVDCNetwork.OrgVDCNetwork.Tasks.Task { + return Task{taskItem, vdc.client}, nil + } + return Task{}, fmt.Errorf("[%s] no suitable task found", util.CurrentFuncName()) + } + } + return Task{}, fmt.Errorf("network creation failed: no operational link found") +} + +// GetNetworkList returns a list of networks for the VDC +func (vdc *Vdc) GetNetworkList() ([]*types.QueryResultOrgVdcNetworkRecordType, error) { + // Find the list of networks with the wanted name + result, err := vdc.client.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "orgVdcNetwork", + "filter": fmt.Sprintf("vdc==%s", url.QueryEscape(vdc.Vdc.ID)), + "filterEncoded": "true", + }) + if err != nil { + return nil, fmt.Errorf("[findEdgeGatewayConnection] error returning the list of networks for VDC: %s", err) + } + return result.Results.OrgVdcNetworkRecord, nil +} + +// FindEdgeGatewayNameByNetwork searches the VDC for a connection between an edge gateway and a given network. +// On success, returns the name of the edge gateway +func (vdc *Vdc) FindEdgeGatewayNameByNetwork(networkName string) (string, error) { + + // Find the list of networks with the wanted name + result, err := vdc.client.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "orgVdcNetwork", + "filter": fmt.Sprintf("name==%s;vdc==%s", url.QueryEscape(networkName), url.QueryEscape(vdc.Vdc.ID)), + "filterEncoded": "true", + }) + if err != nil { + return "", fmt.Errorf("[findEdgeGatewayConnection] error returning the list of networks for VDC: %s", err) + } + netList := result.Results.OrgVdcNetworkRecord + + for _, net := range netList { + if net.Name == networkName { + // linkType is not well documented, but empiric tests show that: + // 0 = direct + // 1 = routed + // 2 = isolated + if net.ConnectedTo != "" && net.LinkType == 1 { // We only want routed networks + return net.ConnectedTo, nil + } + } + } + return "", ErrorEntityNotFound +} + +// getParentVdc retrieves the VDC to which the network is attached +func (orgVdcNet *OrgVDCNetwork) getParentVdc() (*Vdc, error) { + for _, link := range orgVdcNet.OrgVDCNetwork.Link { + if link.Type == "application/vnd.vmware.vcloud.vdc+xml" { + + vdc := NewVdc(orgVdcNet.client) + + _, err := orgVdcNet.client.ExecuteRequest(link.HREF, http.MethodGet, + "", "error retrieving parent vdc: %s", nil, vdc.Vdc) + if err != nil { + return nil, err + } + + return vdc, nil + } + } + return nil, fmt.Errorf("could not find a parent Vdc for network %s", orgVdcNet.OrgVDCNetwork.Name) +} + +// getEdgeGateway retrieves the edge gateway connected to a routed network +func (orgVdcNet *OrgVDCNetwork) getEdgeGateway() (*EdgeGateway, error) { + // If it is not routed, returns an error + if orgVdcNet.OrgVDCNetwork.Configuration.FenceMode != types.FenceModeNAT { + return nil, fmt.Errorf("network %s is not routed", orgVdcNet.OrgVDCNetwork.Name) + } + vdc, err := orgVdcNet.getParentVdc() + if err != nil { + return nil, err + } + + // Since this function can be called from Update(), we must take into account the + // possibility of a name change. If this is happening, we need to retrieve the original + // name, which is still stored in the VDC. + oldNetwork, err := vdc.GetOrgVdcNetworkById(orgVdcNet.OrgVDCNetwork.ID, false) + if err != nil { + return nil, err + } + networkName := oldNetwork.OrgVDCNetwork.Name + + edgeGatewayName, err := vdc.FindEdgeGatewayNameByNetwork(networkName) + if err != nil { + return nil, err + } + + return vdc.GetEdgeGatewayByName(edgeGatewayName, false) +} + +// UpdateAsync will change the contents of a network using the information in the +// receiver data structure. +func (orgVdcNet *OrgVDCNetwork) UpdateAsync() (Task, error) { + if orgVdcNet.OrgVDCNetwork.HREF == "" { + return Task{}, fmt.Errorf("cannot update Org VDC network: HREF is empty") + } + if orgVdcNet.OrgVDCNetwork.Name == "" { + return Task{}, fmt.Errorf("cannot update Org VDC network: name is empty") + } + if orgVdcNet.OrgVDCNetwork.Configuration == nil { + return Task{}, fmt.Errorf("cannot update Org VDC network: configuration is empty") + } + + href := orgVdcNet.OrgVDCNetwork.HREF + + if !strings.Contains(href, "/api/admin/") { + href = strings.ReplaceAll(href, "/api/", "/api/admin/") + } + + // Routed networks need to have edge gateway information for both creation and update. + // Since the network data structure doesn't return edge gateway information, + // we fetch it explicitly. + if orgVdcNet.OrgVDCNetwork.Configuration.FenceMode == types.FenceModeNAT { + edgeGateway, err := orgVdcNet.getEdgeGateway() + if err != nil { + return Task{}, fmt.Errorf("error retrieving edge gateway for Org VDC network %s : %s", orgVdcNet.OrgVDCNetwork.Name, err) + } + orgVdcNet.OrgVDCNetwork.EdgeGateway = &types.Reference{ + HREF: edgeGateway.EdgeGateway.HREF, + ID: edgeGateway.EdgeGateway.ID, + Type: edgeGateway.EdgeGateway.Type, + Name: edgeGateway.EdgeGateway.Name, + } + } + return orgVdcNet.client.ExecuteTaskRequest(href, http.MethodPut, + types.MimeOrgVdcNetwork, "error updating Org VDC network: %s", orgVdcNet.OrgVDCNetwork) +} + +// Update is a wrapper around UpdateAsync, where we +// explicitly wait for the task to finish. +// The pointer receiver is refreshed after update +func (orgVdcNet *OrgVDCNetwork) Update() error { + task, err := orgVdcNet.UpdateAsync() + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return err + } + + return orgVdcNet.Refresh() +} + +// Rename is a wrapper around Update(), where we only change the name of the network +// Since the purpose is explicitly changing the name, the function will fail if the new name +// is not different from the existing one +func (orgVdcNet *OrgVDCNetwork) Rename(newName string) error { + if orgVdcNet.OrgVDCNetwork.Name == newName { + return fmt.Errorf("new name is the same ase the existing name") + } + orgVdcNet.OrgVDCNetwork.Name = newName + return orgVdcNet.Update() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/productsection.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/productsection.go new file mode 100644 index 000000000..b6233089c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/productsection.go @@ -0,0 +1,53 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// setProductSectionList is a shared function for both vApp and VM +func setProductSectionList(client *Client, href string, productSection *types.ProductSectionList) error { + if href == "" { + return fmt.Errorf("href cannot be empty to set product section") + } + + productSection.Xmlns = types.XMLNamespaceVCloud + productSection.Ovf = types.XMLNamespaceOVF + + task, err := client.ExecuteTaskRequest(href+"/productSections", http.MethodPut, + types.MimeProductSection, "error setting product section: %s", productSection) + + if err != nil { + return fmt.Errorf("unable to set product section: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("task for setting product section failed: %s", err) + } + + return nil +} + +// getProductSectionList is a shared function for both vApp and VM +func getProductSectionList(client *Client, href string) (*types.ProductSectionList, error) { + if href == "" { + return nil, fmt.Errorf("href cannot be empty to get product section") + } + productSection := &types.ProductSectionList{} + + _, err := client.ExecuteRequest(href+"/productSections", http.MethodGet, + types.MimeProductSection, "error retrieving product section : %s", nil, productSection) + + if err != nil { + return nil, fmt.Errorf("unable to retrieve product section: %s", err) + } + + return productSection, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query.go new file mode 100644 index 000000000..d142e467c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query.go @@ -0,0 +1,88 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +type Results struct { + Results *types.QueryResultRecordsType + client *Client +} + +func NewResults(cli *Client) *Results { + return &Results{ + Results: new(types.QueryResultRecordsType), + client: cli, + } +} + +func (vcdClient *VCDClient) Query(params map[string]string) (Results, error) { + + req := vcdClient.Client.NewRequest(params, http.MethodGet, vcdClient.QueryHREF, nil) + req.Header.Add("Accept", "vnd.vmware.vcloud.org+xml;version="+vcdClient.Client.APIVersion) + + return getResult(&vcdClient.Client, req) +} + +func (vdc *Vdc) Query(params map[string]string) (Results, error) { + queryUrl := vdc.client.VCDHREF + queryUrl.Path += "/query" + req := vdc.client.NewRequest(params, http.MethodGet, queryUrl, nil) + req.Header.Add("Accept", "vnd.vmware.vcloud.org+xml;version="+vdc.client.APIVersion) + + return getResult(vdc.client, req) +} + +// QueryWithNotEncodedParams uses Query API to search for requested data +func (client *Client) QueryWithNotEncodedParams(params map[string]string, notEncodedParams map[string]string) (Results, error) { + return client.QueryWithNotEncodedParamsWithApiVersion(params, notEncodedParams, client.APIVersion) +} + +// QueryWithNotEncodedParams uses Query API to search for requested data +func (client *Client) QueryWithNotEncodedParamsWithApiVersion(params map[string]string, notEncodedParams map[string]string, apiVersion string) (Results, error) { + queryUrl := client.VCDHREF + queryUrl.Path += "/query" + + req := client.NewRequestWitNotEncodedParamsWithApiVersion(params, notEncodedParams, http.MethodGet, queryUrl, nil, apiVersion) + req.Header.Add("Accept", "vnd.vmware.vcloud.org+xml;version="+apiVersion) + + return getResult(client, req) +} + +func (vcdClient *VCDClient) QueryWithNotEncodedParams(params map[string]string, notEncodedParams map[string]string) (Results, error) { + return vcdClient.Client.QueryWithNotEncodedParams(params, notEncodedParams) +} + +func (vdc *Vdc) QueryWithNotEncodedParams(params map[string]string, notEncodedParams map[string]string) (Results, error) { + return vdc.client.QueryWithNotEncodedParams(params, notEncodedParams) +} + +func (vcdClient *VCDClient) QueryWithNotEncodedParamsWithApiVersion(params map[string]string, notEncodedParams map[string]string, apiVersion string) (Results, error) { + return vcdClient.Client.QueryWithNotEncodedParamsWithApiVersion(params, notEncodedParams, apiVersion) +} + +func (vdc *Vdc) QueryWithNotEncodedParamsWithApiVersion(params map[string]string, notEncodedParams map[string]string, apiVersion string) (Results, error) { + return vdc.client.QueryWithNotEncodedParamsWithApiVersion(params, notEncodedParams, apiVersion) +} + +func getResult(client *Client, request *http.Request) (Results, error) { + resp, err := checkResp(client.Http.Do(request)) + if err != nil { + return Results{}, fmt.Errorf("error retrieving query: %s", err) + } + + results := NewResults(client) + + if err = decodeBody(types.BodyTypeXML, resp, results.Results); err != nil { + return Results{}, fmt.Errorf("error decoding query results: %s", err) + } + + return *results, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query_metadata.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query_metadata.go new file mode 100644 index 000000000..e20a60df4 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/query_metadata.go @@ -0,0 +1,323 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +/* +This file contains functions that allow an extended query including metadata fields. + +The metadata fields need to be requested explicitly (we can't just ask for generic metadata to be included +in the query result. Due to the query system implementation, when we request metadata fields, we must also +list the regular fields that we want in the result. +For this reason, we need to have the list of fields supported by the query for each query type. Not all the +fields can be used in the "fields" parameter of the query. + +The function queryFieldsOnDemand provides the fields for the supported types. + +Example: we have type "X" with fields "a", "b", "c", "d". It supports metadata. +If we want to query X without metadata, we run a simple query?type=X;[...] + +If we also want metadata, we need to know which keys we want to fetch, and run +query?type=X;fields=a,b,c,d,metadata:key1,metadata:key2 + +*/ + +// MetadataFilter is a definition of a value used to filter metadata. +// It is made of a Type (such as 'STRING', 'INT', 'BOOL") and a Value, which is the value we want to search for. +type MetadataFilter struct { + Type string + Value string +} + +// queryFieldsOnDemand returns the list of fields that can be requested in the option "fields" of a query +// Note that an alternative approach using `reflect` would require several exceptions to list all the +// fields that are not supported. +func queryFieldsOnDemand(queryType string) ([]string, error) { + // entities for which the fields on demand are supported + var ( + vappTemplatefields = []string{"ownerName", "catalogName", "isPublished", "name", "vdc", "vdcName", + "org", "creationDate", "isBusy", "isGoldMaster", "isEnabled", "status", "isDeployed", "isExpired", + "storageProfileName"} + edgeGatewayFields = []string{"name", "vdc", "orgVdcName", "numberOfExtNetworks", "numberOfOrgNetworks", "isBusy", + "gatewayStatus", "haStatus"} + orgVdcNetworkFields = []string{"name", "defaultGateway", "netmask", "dns1", "dns2", "dnsSuffix", "linkType", + "connectedTo", "vdc", "isBusy", "isShared", "vdcName", "isIpScopeInherited"} + catalogFields = []string{"name", "isPublished", "isShared", "creationDate", "orgName", "ownerName", + "numberOfMedia", "owner"} + mediaFields = []string{"ownerName", "catalogName", "isPublished", "name", "vdc", "vdcName", "org", + "creationDate", "isBusy", "storageB", "owner", "catalog", "catalogItem", "status", + "storageProfileName", "taskStatusName", "isInCatalog", "task", + "isIso", "isVdcEnabled", "taskStatus", "taskDetails"} + catalogItemFields = []string{"entity", "entityName", "entityType", "catalog", "catalogName", "ownerName", + "owner", "isPublished", "vdc", "vdcName", "isVdcEnabled", "creationDate", "isExpired", "status"} + vmFields = []string{"catalogName", "container", "containerName", "datastoreName", "description", + "gcStatus", "guestOs", "hardwareVersion", "hostName", "isAutoNature", "isDeleted", "isDeployed", "isPublished", + "isVAppTemplate", "isVdcEnabled", "memoryMB", "moref", "name", "numberOfCpus", "org", "status", + "storageProfileName", "vc", "vdc", "vmToolsVersion", "containerStatus", "pvdcHighestSupportedHardwareVersion", + "isComputePolicyCompliant", "vmSizingPolicyId", "vmPlacementPolicyId", "encrypted", "dateCreated", + "totalStorageAllocatedMb", "isExpired"} + vappFields = []string{"creationDate", "isBusy", "isDeployed", "isEnabled", "isExpired", "isInMaintenanceMode", "isPublic", + "ownerName", "status", "vdc", "vdcName", "numberOfVMs", "numberOfCpus", "cpuAllocationMhz", "cpuAllocationInMhz", + "storageKB", "memoryAllocationMB", "isAutoDeleteNotified", "isAutoUndeployNotified", "isVdcEnabled", "honorBookOrder", + "pvdcHighestSupportedHardwareVersion", "lowestHardwareVersionInVApp"} + orgVdcFields = []string{"name", "description", "isEnabled", "cpuAllocationMhz", "cpuLimitMhz", "cpuUsedMhz", + "memoryAllocationMB", "memoryLimitMB", "memoryUsedMB", "storageLimitMB", "storageUsedMB", "providerVdcName", + "providerVdc", "orgName", "org", "allocationModel", "numberOfVApps", "numberOfUnmanagedVApps", "numberOfMedia", + "numberOfDisks", "numberOfVAppTemplates", "vcName", "isBusy", "status", "networkPool", "numberOfResourcePools", + "numberOfStorageProfiles", "usedNetworksInVdc", "numberOfVMs", "numberOfRunningVMs", "numberOfDeployedVApps", + "numberOfDeployedUnmanagedVApps", "isThinProvisioned", "isFastProvisioned", "networkProviderType", + "cpuOverheadMhz", "isVCEnabled", "memoryReservedMB", "cpuReservedMhz", "storageOverheadMB", "memoryOverheadMB", "vc"} + fieldsOnDemand = map[string][]string{ + types.QtVappTemplate: vappTemplatefields, + types.QtAdminVappTemplate: vappTemplatefields, + types.QtEdgeGateway: edgeGatewayFields, + types.QtOrgVdcNetwork: orgVdcNetworkFields, + types.QtCatalog: catalogFields, + types.QtAdminCatalog: catalogFields, + types.QtMedia: mediaFields, + types.QtAdminMedia: mediaFields, + types.QtCatalogItem: catalogItemFields, + types.QtAdminCatalogItem: catalogItemFields, + types.QtVm: vmFields, + types.QtAdminVm: vmFields, + types.QtVapp: vappFields, + types.QtAdminVapp: vappFields, + types.QtOrgVdc: orgVdcFields, + types.QtAdminOrgVdc: orgVdcFields, + } + ) + + fields, ok := fieldsOnDemand[queryType] + if !ok { + return nil, fmt.Errorf("query type '%s' not supported", queryType) + } + return fields, nil +} + +// addResults takes records from the appropriate field in the latest results and adds them to the cumulative results +func addResults(queryType string, cumulativeResults, newResults Results) (Results, int, error) { + + var size int + switch queryType { + case types.QtAdminVappTemplate: + cumulativeResults.Results.AdminVappTemplateRecord = append(cumulativeResults.Results.AdminVappTemplateRecord, newResults.Results.AdminVappTemplateRecord...) + size = len(newResults.Results.AdminVappTemplateRecord) + case types.QtVappTemplate: + size = len(newResults.Results.VappTemplateRecord) + cumulativeResults.Results.VappTemplateRecord = append(cumulativeResults.Results.VappTemplateRecord, newResults.Results.VappTemplateRecord...) + case types.QtCatalogItem: + cumulativeResults.Results.CatalogItemRecord = append(cumulativeResults.Results.CatalogItemRecord, newResults.Results.CatalogItemRecord...) + size = len(newResults.Results.CatalogItemRecord) + case types.QtAdminCatalogItem: + cumulativeResults.Results.AdminCatalogItemRecord = append(cumulativeResults.Results.AdminCatalogItemRecord, newResults.Results.AdminCatalogItemRecord...) + size = len(newResults.Results.AdminCatalogItemRecord) + case types.QtMedia: + cumulativeResults.Results.MediaRecord = append(cumulativeResults.Results.MediaRecord, newResults.Results.MediaRecord...) + size = len(newResults.Results.MediaRecord) + case types.QtAdminMedia: + cumulativeResults.Results.AdminMediaRecord = append(cumulativeResults.Results.AdminMediaRecord, newResults.Results.AdminMediaRecord...) + size = len(newResults.Results.AdminMediaRecord) + case types.QtCatalog: + cumulativeResults.Results.CatalogRecord = append(cumulativeResults.Results.CatalogRecord, newResults.Results.CatalogRecord...) + size = len(newResults.Results.CatalogRecord) + case types.QtAdminCatalog: + cumulativeResults.Results.AdminCatalogRecord = append(cumulativeResults.Results.AdminCatalogRecord, newResults.Results.AdminCatalogRecord...) + size = len(newResults.Results.AdminCatalogRecord) + case types.QtOrgVdcNetwork: + cumulativeResults.Results.OrgVdcNetworkRecord = append(cumulativeResults.Results.OrgVdcNetworkRecord, newResults.Results.OrgVdcNetworkRecord...) + size = len(newResults.Results.OrgVdcNetworkRecord) + case types.QtEdgeGateway: + cumulativeResults.Results.EdgeGatewayRecord = append(cumulativeResults.Results.EdgeGatewayRecord, newResults.Results.EdgeGatewayRecord...) + size = len(newResults.Results.EdgeGatewayRecord) + case types.QtVm: + cumulativeResults.Results.VMRecord = append(cumulativeResults.Results.VMRecord, newResults.Results.VMRecord...) + size = len(newResults.Results.VMRecord) + case types.QtAdminVm: + cumulativeResults.Results.AdminVMRecord = append(cumulativeResults.Results.AdminVMRecord, newResults.Results.AdminVMRecord...) + size = len(newResults.Results.AdminVMRecord) + case types.QtVapp: + cumulativeResults.Results.VAppRecord = append(cumulativeResults.Results.VAppRecord, newResults.Results.VAppRecord...) + size = len(newResults.Results.VAppRecord) + case types.QtAdminVapp: + cumulativeResults.Results.AdminVAppRecord = append(cumulativeResults.Results.AdminVAppRecord, newResults.Results.AdminVAppRecord...) + size = len(newResults.Results.AdminVAppRecord) + case types.QtOrgVdc: + cumulativeResults.Results.OrgVdcRecord = append(cumulativeResults.Results.OrgVdcRecord, newResults.Results.OrgVdcRecord...) + size = len(newResults.Results.OrgVdcRecord) + case types.QtAdminOrgVdc: + cumulativeResults.Results.OrgVdcAdminRecord = append(cumulativeResults.Results.OrgVdcAdminRecord, newResults.Results.OrgVdcAdminRecord...) + size = len(newResults.Results.OrgVdcAdminRecord) + + default: + return Results{}, 0, fmt.Errorf("query type %s not supported", queryType) + } + + return cumulativeResults, size, nil +} + +// cumulativeQuery runs a paginated query and collects all elements until the total number of records is retrieved +func (client *Client) cumulativeQuery(queryType string, params, notEncodedParams map[string]string) (Results, error) { + var supportedQueryTypes = []string{ + types.QtVappTemplate, + types.QtAdminVappTemplate, + types.QtEdgeGateway, + types.QtOrgVdcNetwork, + types.QtCatalog, + types.QtAdminCatalog, + types.QtMedia, + types.QtAdminMedia, + types.QtCatalogItem, + types.QtAdminCatalogItem, + types.QtVm, + types.QtAdminVm, + types.QtVapp, + types.QtAdminVapp, + types.QtOrgVdc, + types.QtAdminOrgVdc, + } + // Make sure the query type is supported + // We need to check early, as queries that would return less than 25 items (default page size) would succeed, + // but the check on query type will happen once that threshold is crossed. + isSupported := false + for _, qt := range supportedQueryTypes { + if qt == queryType { + isSupported = true + break + } + } + if !isSupported { + return Results{}, fmt.Errorf("[cumulativeQuery] query type %s not supported", queryType) + } + + result, err := client.QueryWithNotEncodedParams(params, notEncodedParams) + if err != nil { + return Results{}, err + } + wanted := int(result.Results.Total) + retrieved := int(wanted) + if retrieved > result.Results.PageSize { + retrieved = result.Results.PageSize + } + if retrieved == wanted { + return result, nil + } + page := result.Results.Page + + var cumulativeResult = Results{ + Results: result.Results, + client: nil, + } + + for retrieved != wanted { + page++ + notEncodedParams["page"] = fmt.Sprintf("%d", page) + var size int + newResult, err := client.QueryWithNotEncodedParams(params, notEncodedParams) + if err != nil { + return Results{}, err + } + cumulativeResult, size, err = addResults(queryType, cumulativeResult, newResult) + if err != nil { + return Results{}, err + } + retrieved += size + } + + return result, nil +} + +// queryWithMetadataFields is a wrapper around QueryWithNotEncodedParams with additional metadata fields +// being returned. +// +// * queryType is the type of the query. Only the ones listed within queryFieldsOnDemand are supported +// * params and notEncodedParams are the same ones passed to QueryWithNotEncodedParams +// * metadataFields is the list of fields to be included in the query results +// * if isSystem is true, metadata fields are requested as 'metadata@SYSTEM:fieldName' +func (client *Client) queryWithMetadataFields(queryType string, params, notEncodedParams map[string]string, + metadataFields []string, isSystem bool) (Results, error) { + if notEncodedParams == nil { + notEncodedParams = make(map[string]string) + } + notEncodedParams["type"] = queryType + + if len(metadataFields) == 0 { + return client.cumulativeQuery(queryType, params, notEncodedParams) + } + + fields, err := queryFieldsOnDemand(queryType) + if err != nil { + return Results{}, fmt.Errorf("[queryWithMetadataFields] %s", err) + } + + if len(fields) == 0 { + return Results{}, fmt.Errorf("[queryWithMetadataFields] no fields found for type '%s'", queryType) + } + metadataFieldText := "" + prefix := "metadata" + if isSystem { + prefix = "metadata@SYSTEM" + } + for i, field := range metadataFields { + metadataFieldText += fmt.Sprintf("%s:%s", prefix, field) + if i != len(metadataFields) { + metadataFieldText += "," + } + } + + notEncodedParams["fields"] = strings.Join(fields, ",") + "," + metadataFieldText + + return client.cumulativeQuery(queryType, params, notEncodedParams) +} + +// queryByMetadataFilter is a wrapper around QueryWithNotEncodedParams with additional filtering +// on metadata fields +// Unlike queryWithMetadataFields, this function does not return the metadata fields, but only uses +// them to perform the filter. +// +// * params and notEncodedParams are the same ones passed to QueryWithNotEncodedParams +// * metadataFilter is is a map of conditions to use for filtering +// * if isSystem is true, metadata fields are requested as 'metadata@SYSTEM:fieldName' +func (client *Client) queryByMetadataFilter(queryType string, params, notEncodedParams map[string]string, + metadataFilters map[string]MetadataFilter, isSystem bool) (Results, error) { + + if len(metadataFilters) == 0 { + return Results{}, fmt.Errorf("[queryByMetadataFilter] no metadata fields provided") + } + if notEncodedParams == nil { + notEncodedParams = make(map[string]string) + } + notEncodedParams["type"] = queryType + + metadataFilterText := "" + prefix := "metadata" + if isSystem { + prefix = "metadata@SYSTEM" + } + count := 0 + for key, value := range metadataFilters { + metadataFilterText += fmt.Sprintf("%s:%s==%s:%s", prefix, key, value.Type, url.QueryEscape(value.Value)) + if count < len(metadataFilters)-1 { + metadataFilterText += ";" + } + count++ + } + + filter, ok := notEncodedParams["filter"] + if ok { + filter = "(" + filter + ";" + metadataFilterText + ")" + } else { + filter = metadataFilterText + } + notEncodedParams["filter"] = filter + + return client.cumulativeQuery(queryType, params, notEncodedParams) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights.go new file mode 100644 index 000000000..fba155e71 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights.go @@ -0,0 +1,250 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ +package govcd + +import ( + "fmt" + "net/url" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// getAllRights retrieves all rights. Query parameters can be supplied to perform additional +// filtering +func getAllRights(client *Client, queryParameters url.Values, additionalHeader map[string]string) ([]*types.Right, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRights + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.Right{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, additionalHeader) + if err != nil { + return nil, err + } + + return typeResponses, nil +} + +// GetAllRights retrieves all available rights. +// Query parameters can be supplied to perform additional filtering +func (client *Client) GetAllRights(queryParameters url.Values) ([]*types.Right, error) { + return getAllRights(client, queryParameters, nil) +} + +// GetAllRights retrieves all available rights. Query parameters can be supplied to perform additional +// filtering +func (adminOrg *AdminOrg) GetAllRights(queryParameters url.Values) ([]*types.Right, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getAllRights(adminOrg.client, queryParameters, getTenantContextHeader(tenantContext)) +} + +// getRights retrieves rights belonging to a given Role or similar container (global role, rights bundle). +// Query parameters can be supplied to perform additional filtering +func getRights(client *Client, roleId, endpoint string, queryParameters url.Values, additionalHeader map[string]string) ([]*types.Right, error) { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint + roleId + "/rights") + if err != nil { + return nil, err + } + + typeResponses := []*types.Right{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, additionalHeader) + if err != nil { + return nil, err + } + + return typeResponses, nil +} + +// GetRights retrieves all rights belonging to a given Role. Query parameters can be supplied to perform additional +// filtering +func (role *Role) GetRights(queryParameters url.Values) ([]*types.Right, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + return getRights(role.client, role.Role.ID, endpoint, queryParameters, getTenantContextHeader(role.TenantContext)) +} + +// getRightByName retrieves a right by given name +func getRightByName(client *Client, name string, additionalHeader map[string]string) (*types.Right, error) { + var params = url.Values{} + + slowSearch := false + + // When the right name contains commas or semicolons, the encoding is rejected by the API. + // For this reason, when one or more commas or semicolons are present (6 occurrences in more than 300 right names) + // we run the search brute force, by fetching all the rights, and comparing the names. + // This problem should be fixed in 10.3 + // TODO: revisit this function after 10.3 is released + if strings.Contains(name, ",") || strings.Contains(name, ";") { + slowSearch = true + } else { + params.Set("filter", "name=="+name) + } + rights, err := getAllRights(client, params, additionalHeader) + if err != nil { + return nil, err + } + if len(rights) == 0 { + return nil, ErrorEntityNotFound + } + + if slowSearch { + for _, right := range rights { + if right.Name == name { + return right, nil + } + } + return nil, ErrorEntityNotFound + } + + if len(rights) > 1 { + return nil, fmt.Errorf("more than one right found with name '%s'", name) + } + return rights[0], nil +} + +// GetRightByName retrieves right by given name +func (client *Client) GetRightByName(name string) (*types.Right, error) { + return getRightByName(client, name, nil) +} + +// GetRightByName retrieves right by given name +func (adminOrg *AdminOrg) GetRightByName(name string) (*types.Right, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getRightByName(adminOrg.client, name, getTenantContextHeader(tenantContext)) +} + +func getRightById(client *Client, id string, additionalHeader map[string]string) (*types.Right, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRights + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty role id") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + right := &types.Right{} + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, right, additionalHeader) + if err != nil { + return nil, err + } + + return right, nil +} + +func (client *Client) GetRightById(id string) (*types.Right, error) { + return getRightById(client, id, nil) +} + +func (adminOrg *AdminOrg) GetRightById(id string) (*types.Right, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getRightById(adminOrg.client, id, getTenantContextHeader(tenantContext)) +} + +// getAllRightsCategories retrieves all rights categories. Query parameters can be supplied to perform additional +// filtering +func getAllRightsCategories(client *Client, queryParameters url.Values, additionalHeader map[string]string) ([]*types.RightsCategory, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsCategories + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.RightsCategory{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, additionalHeader) + if err != nil { + return nil, err + } + + return typeResponses, nil +} + +// GetAllRightsCategories retrieves all rights categories. Query parameters can be supplied to perform additional +// filtering +func (client *Client) GetAllRightsCategories(queryParameters url.Values) ([]*types.RightsCategory, error) { + return getAllRightsCategories(client, queryParameters, nil) +} + +// GetAllRightsCategories retrieves all rights categories. Query parameters can be supplied to perform additional +// filtering +func (adminOrg *AdminOrg) GetAllRightsCategories(queryParameters url.Values) ([]*types.RightsCategory, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getAllRightsCategories(adminOrg.client, queryParameters, getTenantContextHeader(tenantContext)) +} + +func getRightCategoryById(client *Client, id string, additionalHeader map[string]string) (*types.RightsCategory, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsCategories + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty category id") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + rightsCategory := &types.RightsCategory{} + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, rightsCategory, additionalHeader) + if err != nil { + return nil, err + } + + return rightsCategory, nil +} + +// GetRightsCategoryById retrieves a rights category from its ID +func (adminOrg *AdminOrg) GetRightsCategoryById(id string) (*types.RightsCategory, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getRightCategoryById(adminOrg.client, id, getTenantContextHeader(tenantContext)) +} + +// GetRightsCategoryById retrieves a rights category from its ID +func (client *Client) GetRightsCategoryById(id string) (*types.RightsCategory, error) { + return getRightCategoryById(client, id, nil) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights_bundle.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights_bundle.go new file mode 100644 index 000000000..0eb07b2c8 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/rights_bundle.go @@ -0,0 +1,262 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +type RightsBundle struct { + RightsBundle *types.RightsBundle + client *Client +} + +// CreateRightsBundle creates a new rights bundle as a system administrator +func (client *Client) CreateRightsBundle(newRightsBundle *types.RightsBundle) (*RightsBundle, error) { + if !client.IsSysAdmin { + return nil, fmt.Errorf("only system administrator can handle rights bundles") + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + if newRightsBundle.BundleKey == "" { + newRightsBundle.BundleKey = types.VcloudUndefinedKey + } + if newRightsBundle.PublishAll == nil { + newRightsBundle.PublishAll = takeBoolPointer(false) + } + returnBundle := &RightsBundle{ + RightsBundle: &types.RightsBundle{}, + client: client, + } + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, newRightsBundle, returnBundle.RightsBundle, nil) + if err != nil { + return nil, fmt.Errorf("error creating rights bundle: %s", err) + } + + return returnBundle, nil +} + +// Update updates existing rights bundle +func (rb *RightsBundle) Update() (*RightsBundle, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + minimumApiVersion, err := rb.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if rb.RightsBundle.Id == "" { + return nil, fmt.Errorf("cannot update role without id") + } + + urlRef, err := rb.client.OpenApiBuildEndpoint(endpoint, rb.RightsBundle.Id) + if err != nil { + return nil, err + } + + returnRightsBundle := &RightsBundle{ + RightsBundle: &types.RightsBundle{}, + client: rb.client, + } + + err = rb.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, rb.RightsBundle, returnRightsBundle.RightsBundle, nil) + if err != nil { + return nil, fmt.Errorf("error updating rights bundle: %s", err) + } + + return returnRightsBundle, nil +} + +// getAllRightsBundles retrieves all rights bundles. Query parameters can be supplied to perform additional +// filtering +func getAllRightsBundles(client *Client, queryParameters url.Values, additionalHeader map[string]string) ([]*RightsBundle, error) { + if !client.IsSysAdmin { + return nil, fmt.Errorf("only system administrator can handle rights bundles") + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.RightsBundle{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, additionalHeader) + if err != nil { + return nil, err + } + if len(typeResponses) == 0 { + return []*RightsBundle{}, nil + } + var results = make([]*RightsBundle, len(typeResponses)) + for i, r := range typeResponses { + results[i] = &RightsBundle{ + RightsBundle: r, + client: client, + } + } + + return results, nil +} + +// GetAllRightsBundles retrieves all rights bundles. Query parameters can be supplied to perform additional +// filtering +func (client *Client) GetAllRightsBundles(queryParameters url.Values) ([]*RightsBundle, error) { + return getAllRightsBundles(client, queryParameters, nil) +} + +// GetTenants retrieves all tenants associated to a given Rights Bundle. +// Query parameters can be supplied to perform additional filtering +func (rb *RightsBundle) GetTenants(queryParameters url.Values) ([]types.OpenApiReference, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return getContainerTenants(rb.client, rb.RightsBundle.Id, endpoint, queryParameters) +} + +func (rb *RightsBundle) GetRights(queryParameters url.Values) ([]*types.Right, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return getRights(rb.client, rb.RightsBundle.Id, endpoint, queryParameters, nil) +} + +// AddRights adds a collection of rights to a rights bundle +func (rb *RightsBundle) AddRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return addRightsToRole(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, newRights, nil) +} + +// UpdateRights replaces existing rights with the given collection of rights +func (rb *RightsBundle) UpdateRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return updateRightsInRole(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, newRights, nil) +} + +// RemoveRights removes specific rights from a rights bundle +func (rb *RightsBundle) RemoveRights(removeRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return removeRightsFromRole(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, removeRights, nil) +} + +// RemoveAllRights removes all rights from a rights bundle +func (rb *RightsBundle) RemoveAllRights() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return removeAllRightsFromRole(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, nil) +} + +// PublishTenants publishes a rights bundle to one or more tenants +func (rb *RightsBundle) PublishTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return publishContainerToTenants(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, tenants, "add") +} + +// UnpublishTenants removes publication status in rights bundle from one or more tenants +func (rb *RightsBundle) UnpublishTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return publishContainerToTenants(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, tenants, "remove") +} + +// ReplacePublishedTenants publishes a rights bundle to one or more tenants, removing the tenants already present +func (rb *RightsBundle) ReplacePublishedTenants(tenants []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return publishContainerToTenants(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, tenants, "replace") +} + +// PublishAllTenants removes publication status in rights bundle from one or more tenants +func (rb *RightsBundle) PublishAllTenants() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return publishContainerToAllTenants(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, true) +} + +// UnpublishAllTenants removes publication status in rights bundle from one or more tenants +func (rb *RightsBundle) UnpublishAllTenants() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + return publishContainerToAllTenants(rb.client, "RightsBundle", rb.RightsBundle.Name, rb.RightsBundle.Id, endpoint, false) +} + +// GetRightsBundleByName retrieves rights bundle by given name +func (client *Client) GetRightsBundleByName(name string) (*RightsBundle, error) { + queryParams := url.Values{} + queryParams.Add("filter", "name=="+name) + rightsBundles, err := client.GetAllRightsBundles(queryParams) + if err != nil { + return nil, err + } + if len(rightsBundles) == 0 { + return nil, ErrorEntityNotFound + } + if len(rightsBundles) > 1 { + return nil, fmt.Errorf("more than one rights bundle found with name '%s'", name) + } + return rightsBundles[0], nil +} + +// GetRightsBundleById retrieves rights bundle by given ID +func (client *Client) GetRightsBundleById(id string) (*RightsBundle, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty rights bundle id") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + rightsBundle := &RightsBundle{ + RightsBundle: &types.RightsBundle{}, + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, rightsBundle.RightsBundle, nil) + if err != nil { + return nil, err + } + + return rightsBundle, nil +} + +// Delete deletes rights bundle +func (rb *RightsBundle) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRightsBundles + minimumApiVersion, err := rb.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if rb.RightsBundle.Id == "" { + return fmt.Errorf("cannot delete rights bundle without id") + } + + urlRef, err := rb.client.OpenApiBuildEndpoint(endpoint, rb.RightsBundle.Id) + if err != nil { + return err + } + + err = rb.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting rights bundle: %s", err) + } + + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/roles.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/roles.go new file mode 100644 index 000000000..83eaf90ba --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/roles.go @@ -0,0 +1,454 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// Role uses OpenAPI endpoint to operate user roles +type Role struct { + Role *types.Role + client *Client + TenantContext *TenantContext +} + +// GetRoleById retrieves role by given ID +func (adminOrg *AdminOrg) GetRoleById(id string) (*Role, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty role id") + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + role := &Role{ + Role: &types.Role{}, + client: adminOrg.client, + TenantContext: tenantContext, + } + + err = adminOrg.client.OpenApiGetItem(minimumApiVersion, urlRef, nil, role.Role, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, err + } + + return role, nil +} + +// GetRoleByName retrieves role by given name +func (adminOrg *AdminOrg) GetRoleByName(name string) (*Role, error) { + queryParams := url.Values{} + queryParams.Add("filter", "name=="+name) + roles, err := adminOrg.GetAllRoles(queryParams) + if err != nil { + return nil, err + } + if len(roles) == 0 { + return nil, ErrorEntityNotFound + } + if len(roles) > 1 { + return nil, fmt.Errorf("more than one role found with name '%s'", name) + } + return roles[0], nil +} + +// getAllRoles retrieves all roles using OpenAPI endpoint. Query parameters can be supplied to perform additional +// filtering +func getAllRoles(client *Client, queryParameters url.Values, additionalHeader map[string]string) ([]*Role, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponses := []*types.Role{{}} + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &typeResponses, additionalHeader) + if err != nil { + return nil, err + } + + // Wrap all typeResponses into Role types with client + returnRoles := make([]*Role, len(typeResponses)) + for sliceIndex := range typeResponses { + returnRoles[sliceIndex] = &Role{ + Role: typeResponses[sliceIndex], + client: client, + TenantContext: getTenantContextFromHeader(additionalHeader), + } + } + + return returnRoles, nil +} + +// GetAllRoles retrieves all roles as tenant user. Query parameters can be supplied to perform additional +// filtering +func (adminOrg *AdminOrg) GetAllRoles(queryParameters url.Values) ([]*Role, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return getAllRoles(adminOrg.client, queryParameters, getTenantContextHeader(tenantContext)) +} + +// GetAllRoles retrieves all roles as System administrator. Query parameters can be supplied to perform additional +// filtering +func (client *Client) GetAllRoles(queryParameters url.Values) ([]*Role, error) { + return getAllRoles(client, queryParameters, nil) +} + +// CreateRole creates a new role as a tenant administrator +func (adminOrg *AdminOrg) CreateRole(newRole *types.Role) (*Role, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if newRole.BundleKey == "" { + newRole.BundleKey = types.VcloudUndefinedKey + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + returnRole := &Role{ + Role: &types.Role{}, + client: adminOrg.client, + TenantContext: tenantContext, + } + + err = adminOrg.client.OpenApiPostItem(minimumApiVersion, urlRef, nil, newRole, returnRole.Role, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, fmt.Errorf("error creating role: %s", err) + } + + return returnRole, nil +} + +// Update updates existing OpenAPI role +func (role *Role) Update() (*Role, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + minimumApiVersion, err := role.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if role.Role.ID == "" { + return nil, fmt.Errorf("cannot update role without id") + } + + urlRef, err := role.client.OpenApiBuildEndpoint(endpoint, role.Role.ID) + if err != nil { + return nil, err + } + + returnRole := &Role{ + Role: &types.Role{}, + client: role.client, + TenantContext: role.TenantContext, + } + + err = role.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, role.Role, returnRole.Role, getTenantContextHeader(role.TenantContext)) + if err != nil { + return nil, fmt.Errorf("error updating role: %s", err) + } + + return returnRole, nil +} + +// Delete deletes OpenAPI role +func (role *Role) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + minimumApiVersion, err := role.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if role.Role.ID == "" { + return fmt.Errorf("cannot delete role without id") + } + + urlRef, err := role.client.OpenApiBuildEndpoint(endpoint, role.Role.ID) + if err != nil { + return err + } + + err = role.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, getTenantContextHeader(role.TenantContext)) + + if err != nil { + return fmt.Errorf("error deleting role: %s", err) + } + + return nil +} + +// AddRights adds a collection of rights to a role +func (role *Role) AddRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + return addRightsToRole(role.client, "Role", role.Role.Name, role.Role.ID, endpoint, newRights, getTenantContextHeader(role.TenantContext)) +} + +// UpdateRights replaces existing rights with the given collection of rights +func (role *Role) UpdateRights(newRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + return updateRightsInRole(role.client, "Role", role.Role.Name, role.Role.ID, endpoint, newRights, getTenantContextHeader(role.TenantContext)) +} + +// RemoveRights removes specific rights from a role +func (role *Role) RemoveRights(removeRights []types.OpenApiReference) error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + return removeRightsFromRole(role.client, "Role", role.Role.Name, role.Role.ID, endpoint, removeRights, getTenantContextHeader(role.TenantContext)) +} + +// RemoveAllRights removes all rights from a role +func (role *Role) RemoveAllRights() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointRoles + return removeAllRightsFromRole(role.client, "Role", role.Role.Name, role.Role.ID, endpoint, getTenantContextHeader(role.TenantContext)) +} + +// addRightsToRole is a generic function that can add rights to a rights collection (Role, Global Role, or Rights bundle) +// roleType is an informative string (one of "Role", "GlobalRole", or "RightsBundle") +// name and id are the name and ID of the collection +// endpoint is the API endpoint used as a basis for the POST operation +// newRights is a collection of rights (ID+name) to be added +// Note: the API call ignores duplicate rights. If the rights to be added already exist, the call succeeds +// but no changes are recorded +func addRightsToRole(client *Client, roleType, name, id, endpoint string, newRights []types.OpenApiReference, additionalHeader map[string]string) error { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if id == "" { + return fmt.Errorf("cannot update %s without id", roleType) + } + if name == "" { + return fmt.Errorf("empty name given for %s %s", roleType, id) + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id, "/rights") + if err != nil { + return err + } + + var input types.OpenApiItems + + for _, right := range newRights { + input.Values = append(input.Values, types.OpenApiReference{ + Name: right.Name, + ID: right.ID, + }) + } + var pages types.OpenApiPages + + err = client.OpenApiPostItem(minimumApiVersion, urlRef, nil, &input, &pages, additionalHeader) + + if err != nil { + return fmt.Errorf("error adding rights to %s %s: %s", roleType, name, err) + } + + return nil +} + +// updateRightsInRole is a generic function that can change rights in a Role or Global Role +// roleType is an informative string (either "Role" or "GlobalRole") +// name and id are the name and ID of the role +// endpoint is the API endpoint used as a basis for the PUT operation +// newRights is a collection of rights (ID+name) to be added +func updateRightsInRole(client *Client, roleType, name, id, endpoint string, newRights []types.OpenApiReference, additionalHeader map[string]string) error { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if id == "" { + return fmt.Errorf("cannot update %s without id", roleType) + } + if name == "" { + return fmt.Errorf("empty name given for %s %s", roleType, id) + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id, "/rights") + if err != nil { + return err + } + + var input = types.OpenApiItems{ + Values: []types.OpenApiReference{}, + } + + for _, right := range newRights { + input.Values = append(input.Values, types.OpenApiReference{ + Name: right.Name, + ID: right.ID, + }) + } + var pages types.OpenApiPages + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, &input, &pages, additionalHeader) + + if err != nil { + return fmt.Errorf("error updating rights in %s %s: %s", roleType, name, err) + } + + return nil +} + +// removeRightsFromRole is a generic function that can remove rights from a Role or Global Role +// roleType is an informative string (either "Role" or "GlobalRole") +// name and id are the name and ID of the role +// endpoint is the API endpoint used as a basis for the PUT operation +// removeRights is a collection of rights (ID+name) to be removed +func removeRightsFromRole(client *Client, roleType, name, id, endpoint string, removeRights []types.OpenApiReference, additionalHeader map[string]string) error { + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if id == "" { + return fmt.Errorf("cannot update %s without id", roleType) + } + if name == "" { + return fmt.Errorf("empty name given for %s %s", roleType, id) + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id, "/rights") + if err != nil { + return err + } + + var input = types.OpenApiItems{ + Values: []types.OpenApiReference{}, + } + var pages types.OpenApiPages + + currentRights, err := getRights(client, id, endpoint, nil, additionalHeader) + if err != nil { + return err + } + + var foundToRemove = make(map[string]bool) + + // Set the items to be removed as not found by default + for _, rr := range removeRights { + foundToRemove[rr.Name] = false + } + + // Search the current rights for items to delete + for _, cr := range currentRights { + for _, rr := range removeRights { + if cr.ID == rr.ID { + foundToRemove[cr.Name] = true + } + } + } + + for _, cr := range currentRights { + _, found := foundToRemove[cr.Name] + if !found { + input.Values = append(input.Values, types.OpenApiReference{Name: cr.Name, ID: cr.ID}) + } + } + + // Check that all the items to be removed were found in the current rights list + notFoundNames := "" + for name, found := range foundToRemove { + if !found { + if notFoundNames != "" { + notFoundNames += ", " + } + notFoundNames += `"` + name + `"` + } + } + + if notFoundNames != "" { + return fmt.Errorf("rights in %s %s not found for deletion: [%s]", roleType, name, notFoundNames) + } + + err = client.OpenApiPutItem(minimumApiVersion, urlRef, nil, &input, &pages, additionalHeader) + + if err != nil { + return fmt.Errorf("error updating rights in %s %s: %s", roleType, name, err) + } + + return nil +} + +// removeAllRightsFromRole removes all rights from the given role +func removeAllRightsFromRole(client *Client, roleType, name, id, endpoint string, additionalHeader map[string]string) error { + return updateRightsInRole(client, roleType, name, id, endpoint, []types.OpenApiReference{}, additionalHeader) +} + +// FindMissingImpliedRights returns a list of the rights that are implied in the rights provided as input +func FindMissingImpliedRights(client *Client, rights []types.OpenApiReference) ([]types.OpenApiReference, error) { + var ( + impliedRights []types.OpenApiReference + uniqueInputRights = make(map[string]types.OpenApiReference) + uniqueImpliedRights = make(map[string]types.OpenApiReference) + ) + + // Make a searchable collection of unique rights from the input + // This operation removes duplicates from the list + for _, right := range rights { + uniqueInputRights[right.Name] = right + } + + // Find the implied rights + for _, right := range rights { + fullRight, err := client.GetRightByName(right.Name) + if err != nil { + return nil, err + } + for _, ir := range fullRight.ImpliedRights { + _, seenAsInput := uniqueInputRights[ir.Name] + _, seenAsImplied := uniqueImpliedRights[ir.Name] + // If the right has already been added either as explicit ro as implied right, we skip it + if seenAsInput || seenAsImplied { + continue + } + // Add to the unique collection of implied rights + uniqueImpliedRights[ir.Name] = types.OpenApiReference{ + Name: ir.Name, + ID: ir.ID, + } + } + } + + // Create the output list from the implied rights collection + if len(uniqueImpliedRights) > 0 { + for _, right := range uniqueImpliedRights { + impliedRights = append(impliedRights, right) + } + } + + return impliedRights, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/saml_auth.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/saml_auth.go new file mode 100644 index 000000000..09e513944 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/saml_auth.go @@ -0,0 +1,321 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +/* +This file implements SAML authentication flow using Microsoft Active Directory Federation Services +(ADFS). It adds support to authenticate to Cloud Director using SAML authentication (by applying +WithSamlAdfs() configuration option to NewVCDClient function). The identity provider (IdP) must be +Active Directory Federation Services (ADFS) and "/adfs/services/trust/13/usernamemixed" endpoint +must be enabled to make it work. Furthermore username must be supplied in ADFS friendly format - +test@contoso.com' or 'contoso.com\test'. + +It works by finding ADFS login endpoint for vCD by querying vCD SAML redirect endpoint +for specific Org and then submits authentication request to "/adfs/services/trust/13/usernamemixed" +endpoint of ADFS server. Using ADFS response it constructs a SIGN token which vCD accepts for the +"/api/sessions". After first initial "login" it grabs the regular X-Vcloud-Authorization token and +uses it for further requests. +More information in vCD documentation: +https://code.vmware.com/docs/10000/vcloud-api-programming-guide-for-service-providers/GUID-335CFC35-7AD8-40E5-91BE-53971937A2BB.html + +There is a working code example in /samples/saml_auth_adfs directory how to setup client using SAML +auth. +*/ + +// authorizeSamlAdfs is the main entry point for SAML authentication on ADFS endpoint +// "/adfs/services/trust/13/usernamemixed" +// Input parameters: +// user - username for authentication to ADFS server (e.g. 'test@contoso.com' or +// 'contoso.com\test') +// pass - password for authentication to ADFS server +// org - Org to authenticate to +// override_rpt_id - override relaying party trust ID. If it is empty - vCD Entity ID will be used +// as relaying party trust ID +// +// The general concept is to get a SIGN token from ADFS IdP (Identity Provider) and exchange it with +// regular vCD token for further operations. It is documented in +// https://code.vmware.com/docs/10000/vcloud-api-programming-guide-for-service-providers/GUID-335CFC35-7AD8-40E5-91BE-53971937A2BB.html +// This is achieved with the following steps: +// 1 - Lookup vCD Entity ID to use for ADFS authentication or use custom value if overrideRptId +// field is provided +// 2 - Find ADFS server name by querying vCD SAML URL which responds with HTTP redirect (302) +// 3 - Authenticate to ADFS server using vCD SAML Entity ID or custom value if overrideRptId is +// specified Relying Party Trust Identifier +// 4 - Process received ciphers from ADFS server (gzip and base64 encode) so that data can be used +// as SIGN token in vCD +// 5 - Authenticate to vCD using SIGN token in order to receive back regular +// X-Vcloud-Authorization token +// 6 - Set the received X-Vcloud-Authorization for further usage +func (vcdClient *VCDClient) authorizeSamlAdfs(user, pass, org, overrideRptId string) error { + // Step 1 - find SAML entity ID configured in vCD metadata URL unless overrideRptId is provided + // Example URL: url.Scheme + "://" + url.Host + "/cloud/org/" + org + "/saml/metadata/alias/vcd" + samlEntityId := overrideRptId + var err error + if overrideRptId == "" { + samlEntityId, err = getSamlEntityId(vcdClient, org) + if err != nil { + return fmt.Errorf("SAML - error getting vCD SAML Entity ID: %s", err) + } + } + + // Step 2 - find ADFS server used for SAML by calling vCD SAML endpoint and hoping for a + // redirect to ADFS server. Example URL: + // url.Scheme + "://" + url.Host + "/login/my-org/saml/login/alias/vcd?service=tenant:" + org + adfsAuthEndPoint, err := getSamlAdfsServer(vcdClient, org) + if err != nil { + return fmt.Errorf("SAML - error getting IdP (ADFS): %s", err) + } + + // Step 3 - authenticate to ADFS to receive SIGN token which can be used for vCD authentication + signToken, err := getSamlAuthToken(vcdClient, user, pass, samlEntityId, adfsAuthEndPoint, org) + if err != nil { + return fmt.Errorf("SAML - could not get auth token from IdP (ADFS). Did you specify "+ + "username in ADFS format ('user@contoso.com' or 'contoso.com\\user')? : %s", err) + } + + // Step 4 - gzip and base64 encode SIGN token so that vCD can understand it + base64GzippedSignToken, err := gzipAndBase64Encode(signToken) + if err != nil { + return fmt.Errorf("SAML - error encoding SIGN token: %s", err) + } + util.Logger.Printf("[DEBUG] SAML got SIGN token from IdP '%s' for entity with ID '%s'", + adfsAuthEndPoint, samlEntityId) + + // Step 5 - authenticate to vCD with SIGN token and receive vCD regular token in exchange + accessToken, err := authorizeSignToken(vcdClient, base64GzippedSignToken, org) + if err != nil { + return fmt.Errorf("SAML - error submitting SIGN token to vCD: %s", err) + } + + // Step 6 - set regular vCD auth token X-Vcloud-Authorization + err = vcdClient.SetToken(org, AuthorizationHeader, accessToken) + if err != nil { + return fmt.Errorf("error during token-based authentication: %s", err) + } + + return nil +} + +// getSamlAdfsServer finds out Active Directory Federation Service (ADFS) server to use +// for SAML authentication +// It works by temporarily patching existing http.Client behavior to avoid automatically +// following HTTP redirects and searches for Location header after the request to vCD SAML redirect +// address. The URL to search redirect location is: +// url.Scheme + "://" + url.Host + "/login/my-org/saml/login/alias/vcd?service=tenant:" + org +// +// Concurrency note. This function temporarily patches `vcdCli.Client.Http` therefore http.Client +// would not follow redirects during this time. It is however safe as vCDClient is not expected to +// use `http.Client` in any other place before authentication occurs. +func getSamlAdfsServer(vcdCli *VCDClient, org string) (string, error) { + url := vcdCli.Client.VCDHREF + + // Backup existing http.Client redirect behavior so that it does not follow HTTP redirects + // automatically and restore it right after this function by using defer. A new http.Client + // could be spawned here, but the existing one is re-used on purpose to inherit all other + // settings used for client (timeouts, etc). + backupRedirectChecker := vcdCli.Client.Http.CheckRedirect + + defer func() { + vcdCli.Client.Http.CheckRedirect = backupRedirectChecker + }() + + // Patch http client to avoid following redirects + vcdCli.Client.Http.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + // Construct SAML login URL which should return a redirect to ADFS server + loginURLString := url.Scheme + "://" + url.Host + "/login/" + org + "/saml/login/alias/vcd" + loginURL, err := url.Parse(loginURLString) + if err != nil { + return "", fmt.Errorf("unable to parse login URL '%s': %s", loginURLString, err) + } + util.Logger.Printf("[DEBUG] SAML looking up IdP (ADFS) host redirect in: %s", loginURL.String()) + + // Make a request to URL adding unencoded query parameters in the format: + // "?service=tenant:my-org" + req := vcdCli.Client.NewRequestWitNotEncodedParams( + nil, map[string]string{"service": "tenant:" + org}, http.MethodGet, *loginURL, nil) + httpResponse, err := checkResp(vcdCli.Client.Http.Do(req)) + if err != nil { + return "", fmt.Errorf("SAML - ADFS server query failed: %s", err) + } + + err = decodeBody(types.BodyTypeXML, httpResponse, nil) + if err != nil { + return "", fmt.Errorf("SAML - error decoding body: %s", err) + } + + // httpResponse.Location() returns an error if no 'Location' header is present + adfsEndpoint, err := httpResponse.Location() + if err != nil { + return "", fmt.Errorf("SAML GET request for '%s' did not return HTTP redirect. "+ + "Is SAML configured? Got error: %s", loginURL, err) + } + + authEndPoint := adfsEndpoint.Scheme + "://" + adfsEndpoint.Host + "/adfs/services/trust/13/usernamemixed" + util.Logger.Printf("[DEBUG] SAML got IdP login endpoint: %s", authEndPoint) + + return authEndPoint, nil +} + +// getSamlEntityId attempts to load vCD hosted SAML metadata from URL: +// url.Scheme + "://" + url.Host + "/cloud/org/" + org + "/saml/metadata/alias/vcd" +// Returns an error if Entity ID is empty +// Sample response body can be found in saml_auth_unit_test.go +func getSamlEntityId(vcdCli *VCDClient, org string) (string, error) { + url := vcdCli.Client.VCDHREF + samlMetadataUrl := url.Scheme + "://" + url.Host + "/cloud/org/" + org + "/saml/metadata/alias/vcd" + + metadata := types.VcdSamlMetadata{} + errString := fmt.Sprintf("SAML - unable to load metadata from URL %s: %%s", samlMetadataUrl) + _, err := vcdCli.Client.ExecuteRequest(samlMetadataUrl, http.MethodGet, "", errString, nil, &metadata) + if err != nil { + return "", err + } + + samlEntityId := metadata.EntityID + util.Logger.Printf("[DEBUG] SAML got entity ID: %s", samlEntityId) + + if samlEntityId == "" { + return "", errors.New("SAML - got empty entity ID") + } + + return samlEntityId, nil +} + +// getSamlAuthToken generates a token request payload using function +// getSamlTokenRequestBody. This request is submitted to ADFS server endpoint +// "/adfs/services/trust/13/usernamemixed" and `RequestedSecurityTokenTxt` is expected in response +// Sample response body can be found in saml_auth_unit_test.go +func getSamlAuthToken(vcdCli *VCDClient, user, pass, samlEntityId, authEndpoint, org string) (string, error) { + requestBody := getSamlTokenRequestBody(user, pass, samlEntityId, authEndpoint) + samlTokenRequestBody := strings.NewReader(requestBody) + tokenRequestResponse := types.AdfsAuthResponseEnvelope{} + + // Post to ADFS endpoint "/adfs/services/trust/13/usernamemixed" + authEndpointUrl, err := url.Parse(authEndpoint) + if err != nil { + return "", fmt.Errorf("SAML - error parsing authentication endpoint %s: %s", authEndpoint, err) + } + req := vcdCli.Client.NewRequest(nil, http.MethodPost, *authEndpointUrl, samlTokenRequestBody) + req.Header.Add("Content-Type", types.SoapXML) + resp, err := vcdCli.Client.Http.Do(req) + resp, err = checkRespWithErrType(types.BodyTypeXML, resp, err, &types.AdfsAuthErrorEnvelope{}) + if err != nil { + return "", fmt.Errorf("SAML - ADFS token request query failed for RPT ID ('%s'): %s", + samlEntityId, err) + } + + err = decodeBody(types.BodyTypeXML, resp, &tokenRequestResponse) + if err != nil { + return "", fmt.Errorf("SAML - error decoding ADFS token request response: %s", err) + } + + tokenString := tokenRequestResponse.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse.RequestedSecurityTokenTxt.Text + + return tokenString, nil +} + +// authorizeSignToken submits a SIGN token received from ADFS server and gets regular vCD +// "X-Vcloud-Authorization" token in exchange +// Sample response body can be found in saml_auth_unit_test.go +func authorizeSignToken(vcdCli *VCDClient, base64GzippedSignToken, org string) (string, error) { + url, err := url.Parse(vcdCli.Client.VCDHREF.Scheme + "://" + vcdCli.Client.VCDHREF.Host + "/api/sessions") + if err != nil { + return "", fmt.Errorf("SAML error - could not parse URL for posting SIGN token: %s", err) + } + + signHeader := http.Header{} + signHeader.Add("Authorization", `SIGN token="`+base64GzippedSignToken+`",org="`+org+`"`) + + req := vcdCli.Client.newRequest(nil, nil, http.MethodPost, *url, nil, vcdCli.Client.APIVersion, signHeader) + resp, err := checkResp(vcdCli.Client.Http.Do(req)) + if err != nil { + return "", fmt.Errorf("SAML - error submitting SIGN token for authentication to %s: %s", req.URL.String(), err) + } + err = decodeBody(types.BodyTypeXML, resp, nil) + if err != nil { + return "", fmt.Errorf("SAML - error decoding body SIGN token auth response: %s", err) + } + + accessToken := resp.Header.Get("X-Vcloud-Authorization") + util.Logger.Printf("[DEBUG] SAML - setting access token for further requests") + return accessToken, nil +} + +// getSamlTokenRequestBody returns a SAML Token request body which is accepted by ADFS server +// endpoint "/adfs/services/trust/13/usernamemixed". +// The payload is not configured as a struct and unmarshalled because Go's unmarshalling changes +// structure so that ADFS does not accept the payload +func getSamlTokenRequestBody(user, password, samlEntityIdReference, adfsAuthEndpoint string) string { + return ` + + http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue + + http://www.w3.org/2005/08/addressing/anonymous + + ` + adfsAuthEndpoint + ` + + + ` + time.Now().Format(time.RFC3339) + ` + ` + time.Now().Add(1*time.Minute).Format(time.RFC3339) + ` + + + ` + user + ` + ` + password + ` + + + + + + + + ` + samlEntityIdReference + ` + + + 0 + http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer + + http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue + http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.1#SAMLV2.0 + + +` +} + +// gzipAndBase64Encode accepts a string, gzips it and encodes in base64 +func gzipAndBase64Encode(text string) (string, error) { + var gzipBuffer bytes.Buffer + gz := gzip.NewWriter(&gzipBuffer) + if _, err := gz.Write([]byte(text)); err != nil { + return "", fmt.Errorf("error writing to gzip buffer: %s", err) + } + if err := gz.Close(); err != nil { + return "", fmt.Errorf("error closing gzip buffer: %s", err) + } + base64GzippedToken := base64.StdEncoding.EncodeToString(gzipBuffer.Bytes()) + + return base64GzippedToken, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/sample_govcd_test_config.yaml b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/sample_govcd_test_config.yaml new file mode 100644 index 000000000..51946162d --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/sample_govcd_test_config.yaml @@ -0,0 +1,192 @@ +# COPY THIS FILE to govcd_test_config.yaml +# in the same directory and change the values +# to match your environment. +# +# All items in this file must exist already +# (They will not be removed or left altered) +# The test will create a vApp and remove it at the end +# +provider: + # vCD administrator credentials + # (Providing org credentials will skip some tests) + user: someuser + password: somepassword + # If token is provided, username and password are ignored + token: an_auth_token + # If useSamlAdfs is true - client will try to authenticate against ADFS using SAML. + useSamlAdfs: false + # customAdfsRptId allows to specify custom Relaying Party Trust Identifier. By default client + # will use vCD SAML Entity ID + # customAdfsRptId: "" + + # The 3 fields below allow to set SAML credentials for tests that specifically use it. + # May be useful when local user credentials are used by default. Such credentials will + # authenticate to Org specified in vcd.org parameter. + # samlUser: test@test-forest.net + # samlPassword: XXX + # samlCustomRptId: "my-optional-custom-relaying-party-trust-id" + # + # The vCD address, in the format https://vCD_IP/api + # or https://vCD_host_name/api + url: https://11.111.1.111/api + # + # The organization you are authenticating with + sysOrg: System + # (Optional) MaxRetryTimeout specifies a time limit (in seconds) for retrying requests made by the SDK + # where vCloud director may take time to respond and retry mechanism is needed. + # This must be >0 to avoid instant timeout errors. If omitted - default value is set. + # maxRetryTimeout: 60 + # + # (Optional) httpTimeout specifies a time limit (in seconds) for waiting http response. + # If omitted - default value is set. + # httpTimeout: 600 +vcd: + # Name of the organization (mandatory) + org: myorg + # + # The virtual data center (mandatory) + # The tests will create a vApp here + # + vdc: myvdc + # Provider VDC; if omitted, some tests will be skipped + provider_vdc: + name: myprovidervdc + storage_profile: mystorageprofile + network_pool: mynetworkpool + # Provider VDC details for NSX-T testing + nsxt_provider_vdc: + name: nsxTPvdc1 + storage_profile: "*" + network_pool: "NSX-T Overlay 1" + nsxt: + # NSX-T manager name to be used as defined in VCD + manager: nsxManager1 + # NSX-T tier-0 router used for external network tests + tier0router: tier-0-router + # NSX-T tier-0 VRF router used for external network tests + tier0routerVrf: tier-0-router-vrf + # Existing External Network with correct configuration + externalNetwork: tier0-backed-external-network + # Existing NSX-T based VDC + vdc: nsxt-vdc-name + # Existing NSX-T edge gateway + edgeGateway: nsxt-gw-name + # Existing NSX-T segment to test NSX-T Imported Org Vdc network + nsxtImportSegment: vcd-org-vdc-imported-network-backing + # An Org catalog, possibly containing at least one item + catalog: + name: mycat + # One item in the catalog. It will be used to compose test vApps. Some tests rely on it + # being Photon OS. If it is not Photon OS - some tests will be skipped + catalogItem: myitem + # + # An optional description for the catalog. Its test will be skipped if omitted. + # If provided, it must be the current description of the catalog + description: mycat for loading + # + # An optional description for the catalog item + catalogItemDescription: myitem to create vapps + # + # Item in the catalog. If available it will be used to test vApp with multi VMs and `ovaMultiVmPath` will be ignored + catalogItemWithMultiVms: my item with multi VMs + # Name of VM in `catalogItemWithMultiVms` template or in `ovaMultiVmPath` if `catalogItemWithMultiVms` isn't provided. Default vmName `thirdVM` in default OVA. + vmNameInMultiVmItem: thirdVM + # Existing VDC networks. At least one is needed. + network: + # First vdc network (mandatory) + network1: "mynet" + # Second vdc network. If omitted, some tests will be skipped. + network2: "mynet2" + # Storage profiles used in the vDC + # One or two can be listed + storageProfile: + # First storage profile (mandatory) + storageProfile1: Development + # Second storage profile. If omitted, some tests will be skipped. + storageProfile2: "*" + # An edge gateway + # (see https://pubs.vmware.com/vca/topic/com.vmware.vcloud.api.doc_56/GUID-18B0FB8B-385C-4B6D-982C-4B24D271C646.html) + edgeGateway: myedgegw + # + # The IP of the gateway (must exist) + externalIp: 10.150.10.10 + # + # netmask for the external IP (MANDATORY for edge GW VPN) + externalNetmask: 255.255.224.0 + # + # A free IP in the Org vDC network + internalIp: 192.168.1.10 + # + # netmask for the internal IP (MANDATORY for edge GW VPN) + internalNetmask: 255.255.255.0 + # + # An existing external Network name + externalNetwork: myexternalnet + # + # A port group name for creating a new external network in tests + externalNetworkPortGroup: ForTestingPG + # + # A port group type for creating a new external network in tests. Can be DV_PORTGROUP or NETWORK + externalNetworkPortGroupType: NETWORK + # + # A vSphere server name for creating an external network + vimServer: vc9 + # +logging: + # All items in this section are optional + # Logging is disabled by default. + # See ./util/LOGGING.md for more info + # + # Enables or disables logs + enabled: true + # + # changes the log name + logFileName: "go-vcloud-director.log" + # + # Defines whether we log the requests in HTTP operations + logHttpRequests: true + # + # Defines whether we log the responses in HTTP operations + logHttpResponses: true + # + # Comma-separated list of XML tags to skip from the API logs + skipResponseTags: SupportedVersions,VAppTemplate + # + # Comma-separated list of functions from where we log the API calls. + # When this is set, we only log API requests and responses if the name + # of the function matches any of the names in this list. + logFunctions: FindVAppByName,GetAdminOrgByName + # + # Shows details of cleanup operations after tests + verboseCleanup: true +ova: + # The ova for uploading catalog item for tests. + # Default paths are simple ova provided by project + # Empty values skips the tests + # Absolute or relative path + ovaPath: ../test-resources/test_vapp_template.ova + # + # The chunked ova (vmdk inside are split) for tests + ovaChunkedPath: ../test-resources/template_with_custom_chunk_size.ova + # + # The ova with multi VMs for tests. + ovaMultiVmPath: ../test-resources/vapp_with_3_vms.ova + # + # The ova with no VMDK size in ovf for tests. + ovaWithoutSizePath: ../test-resources/template_without_vmdk_size.ova + # + # The ovf for uploading catalog item for tests. + ovfPath: ../test-resources/test_vapp_template_ovf/descriptor.ovf +media: + # The iso for uploading media item for tests. + # Default paths are simple iso provided by project + # Empty values skips the tests + # Absolute or relative path + mediaPath: ../test-resources/test.iso + # Existing media in test system + mediaName: uploadedMediaName +misc: + # By default tests in this repository pick LDAP container 'rroemhild/test-openldap'. As docker throttles downloads + # it can help to host the image on local registry and pull it from there. This variable overrides default container + # location when set. + #ldapContainer: custom-registry.yyy/directory/test-openldap:latest diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/session_info.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/session_info.go new file mode 100644 index 000000000..9eebb2c97 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/session_info.go @@ -0,0 +1,129 @@ +package govcd + +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// ExtendedSessionInfo collects data regarding a VCD connection +type ExtendedSessionInfo struct { + User string + Org string + Roles []string + Rights []string + Version string + ConnectionType string +} + +// GetSessionInfo collects the basic session information for a VCD connection +func (client *Client) GetSessionInfo() (*types.CurrentSessionInfo, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointSessionCurrent + + // We get the maximum supported version, as early versions of the API return less data + apiVersion, err := client.MaxSupportedVersion() + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + var info types.CurrentSessionInfo + + err = client.OpenApiGetItem(apiVersion, urlRef, nil, &info, nil) + if err != nil { + return nil, err + } + + return &info, nil +} + +// GetExtendedSessionInfo collects extended session information for support and debugging +// It will try to collect as much data as possible, failing only if the minimum data can't +// be collected. +func (vcdClient *VCDClient) GetExtendedSessionInfo() (*ExtendedSessionInfo, error) { + var extendedSessionInfo ExtendedSessionInfo + sessionInfo, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return nil, err + } + switch { + case vcdClient.Client.UsingBearerToken: + extendedSessionInfo.ConnectionType = "Bearer token" + case vcdClient.Client.UsingAccessToken: + extendedSessionInfo.ConnectionType = "API Access token" + default: + extendedSessionInfo.ConnectionType = "Username + password" + } + version, err := vcdClient.Client.GetVcdFullVersion() + if err == nil { + extendedSessionInfo.Version = version.Version.String() + } + if sessionInfo.User.Name == "" { + return nil, fmt.Errorf("no user reference found") + } + extendedSessionInfo.User = sessionInfo.User.Name + + if sessionInfo.Org.Name == "" { + return nil, fmt.Errorf("no Org reference found") + } + extendedSessionInfo.Org = sessionInfo.Org.Name + + if len(sessionInfo.Roles) == 0 { + return &extendedSessionInfo, nil + } + extendedSessionInfo.Roles = append(extendedSessionInfo.Roles, sessionInfo.Roles...) + org, err := vcdClient.GetAdminOrgById(sessionInfo.Org.ID) + if err != nil { + return &extendedSessionInfo, err + } + for _, roleRef := range sessionInfo.RoleRefs { + role, err := org.GetRoleById(roleRef.ID) + if err != nil { + continue + } + rights, err := role.GetRights(nil) + if err != nil { + continue + } + for _, right := range rights { + extendedSessionInfo.Rights = append(extendedSessionInfo.Rights, right.Name) + } + } + return &extendedSessionInfo, nil +} + +// LogSessionInfo prints session information into the default logs +func (client *VCDClient) LogSessionInfo() { + + // If logging is disabled, there is no point in collecting session info + if util.EnableLogging { + info, err := client.GetExtendedSessionInfo() + if err != nil { + util.Logger.Printf("no session info collected: %s\n", err) + return + } + text, err := json.MarshalIndent(info, " ", " ") + if err != nil { + util.Logger.Printf("error formatting session info %s\n", err) + return + } + util.Logger.Println(strings.Repeat("*", 80)) + util.Logger.Println("START SESSION INFO") + util.Logger.Println(strings.Repeat("*", 80)) + util.Logger.Printf("%s\n", text) + util.Logger.Println(strings.Repeat("*", 80)) + util.Logger.Println("END SESSION INFO") + util.Logger.Println(strings.Repeat("*", 80)) + } +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/system.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/system.go new file mode 100644 index 000000000..f4426abfa --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/system.go @@ -0,0 +1,1116 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// Simple structure to pass Edge Gateway creation parameters. +type EdgeGatewayCreation struct { + ExternalNetworks []string // List of external networks to be linked to this gateway + DefaultGateway string // Which network should be used as default gateway (empty name = no default gateway) + OrgName string // parent Org + VdcName string // parent VDC + Name string // edge gateway name + Description string // Optional description + BackingConfiguration string // Type of backing configuration (compact, full) + AdvancedNetworkingEnabled bool // enable advanced gateway + HAEnabled bool // enable HA + UseDefaultRouteForDNSRelay bool // True if the default gateway should be used as the DNS relay + DistributedRoutingEnabled bool // If advanced networking enabled, also enable distributed routing +} + +// Creates an Admin Organization based on settings, description, and org name. +// The Organization created will have these settings specified in the +// settings parameter. The settings variable is defined in types.go. +// Method will fail unless user has an admin token. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/POST-CreateOrganization.html +// Organization creation in vCD has two bugs BZ 2177355, BZ 2228936 (fixes are in 9.1.0.3 and 9.5.0.2) which require +// organization settings to be provided as workarounds. +// At least one element among DelayAfterPowerOnSeconds, DeployedVMQuota, StoredVmQuota, UseServerBootSequence, getVdcQuota +// should be set when providing generalOrgSettings. +// If either VAppLeaseSettings or VAppTemplateLeaseSettings is provided then all elements need to have values, otherwise don't provide them at all. +// Overall elements must be in the correct order. +func CreateOrg(vcdClient *VCDClient, name string, fullName string, description string, settings *types.OrgSettings, isEnabled bool) (Task, error) { + vcomp := &types.AdminOrg{ + Xmlns: types.XMLNamespaceVCloud, + Name: name, + IsEnabled: isEnabled, + FullName: fullName, + Description: description, + OrgSettings: settings, + } + + // There is a bug in the settings of CanPublishCatalogs. + // If UseServerBootSequence is not set, CanPublishCatalogs is always false + // regardless of the value passed during creation. + if settings != nil { + if settings.OrgGeneralSettings != nil { + settings.OrgGeneralSettings.UseServerBootSequence = true + } + } + orgCreateHREF := vcdClient.Client.VCDHREF + orgCreateHREF.Path += "/admin/orgs" + + // Return the task + return vcdClient.Client.ExecuteTaskRequest(orgCreateHREF.String(), http.MethodPost, + "application/vnd.vmware.admin.organization+xml", "error instantiating a new Org: %s", vcomp) + +} + +// Returns the UUID part of an entity ID +// From "urn:vcloud:vdc:72fefde7-4fed-45b8-a774-79b72c870325", +// will return "72fefde7-4fed-45b8-a774-79b72c870325" +// From "urn:vcloud:catalog:97384890-180c-4563-b9b7-0dc50a2430b0" +// will return "97384890-180c-4563-b9b7-0dc50a2430b0" +func getBareEntityUuid(entityId string) (string, error) { + // Regular expression to match an ID: + // 3 strings (alphanumeric + "-") separated by a colon (:) + // 1 group of 8 hexadecimal digits + // 3 groups of 4 hexadecimal digits + // 1 group of 12 hexadecimal digits + reGetID := regexp.MustCompile(`^[\w-]+:[\w-]+:[\w-]+:([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})$`) + matchList := reGetID.FindAllStringSubmatch(entityId, -1) + + // matchList has the format + // [][]string{[]string{"TOTAL MATCHED STRING", "CAPTURED TEXT"}} + // such as + // [][]string{[]string{"urn:vcloud:catalog:97384890-180c-4563-b9b7-0dc50a2430b0", "97384890-180c-4563-b9b7-0dc50a2430b0"}} + if len(matchList) == 0 || len(matchList[0]) < 2 { + return "", fmt.Errorf("error extracting ID from '%s'", entityId) + } + return matchList[0][1], nil +} + +// CreateEdgeGatewayAsync creates an edge gateway using a simplified configuration structure +// https://code.vmware.com/apis/442/vcloud-director/doc/doc/operations/POST-CreateEdgeGateway.html +// +// Note. This function does not allow to pick exact subnet in external network to use for edge +// gateway. It will pick first one instead. +func CreateEdgeGatewayAsync(vcdClient *VCDClient, egwc EdgeGatewayCreation) (Task, error) { + + distributed := egwc.DistributedRoutingEnabled + if !egwc.AdvancedNetworkingEnabled { + distributed = false + } + // This is the main configuration structure + egwConfiguration := &types.EdgeGateway{ + Xmlns: types.XMLNamespaceVCloud, + Name: egwc.Name, + Description: egwc.Description, + Configuration: &types.GatewayConfiguration{ + UseDefaultRouteForDNSRelay: &egwc.UseDefaultRouteForDNSRelay, + HaEnabled: &egwc.HAEnabled, + GatewayBackingConfig: egwc.BackingConfiguration, + AdvancedNetworkingEnabled: &egwc.AdvancedNetworkingEnabled, + DistributedRoutingEnabled: &distributed, + GatewayInterfaces: &types.GatewayInterfaces{ + GatewayInterface: []*types.GatewayInterface{}, + }, + EdgeGatewayServiceConfiguration: &types.GatewayFeatures{}, + }, + } + + if len(egwc.ExternalNetworks) == 0 { + return Task{}, fmt.Errorf("no external networks provided. At least one is needed") + } + + // If the user has indicated a default gateway, we make sure that it matches + // a name in the list of external networks + if egwc.DefaultGateway != "" { + defaultGatewayFound := false + for _, name := range egwc.ExternalNetworks { + if egwc.DefaultGateway == name { + defaultGatewayFound = true + } + } + if !defaultGatewayFound { + return Task{}, fmt.Errorf("default gateway (%s) selected, but its name is not among the external networks (%v)", egwc.DefaultGateway, egwc.ExternalNetworks) + } + } + // Add external networks inside the configuration structure + for _, extNetName := range egwc.ExternalNetworks { + extNet, err := vcdClient.GetExternalNetworkByName(extNetName) + if err != nil { + return Task{}, err + } + + // Populate the subnet participation only if default gateway was set + var subnetParticipation *types.SubnetParticipation + if egwc.DefaultGateway != "" && extNet.ExternalNetwork.Name == egwc.DefaultGateway { + for _, net := range extNet.ExternalNetwork.Configuration.IPScopes.IPScope { + if net.IsEnabled { + subnetParticipation = &types.SubnetParticipation{ + Gateway: net.Gateway, + Netmask: net.Netmask, + } + break + } + } + } + networkConf := &types.GatewayInterface{ + Name: extNet.ExternalNetwork.Name, + DisplayName: extNet.ExternalNetwork.Name, + InterfaceType: "uplink", + Network: &types.Reference{ + HREF: extNet.ExternalNetwork.HREF, + ID: extNet.ExternalNetwork.ID, + Type: "application/vnd.vmware.admin.network+xml", + Name: extNet.ExternalNetwork.Name, + }, + UseForDefaultRoute: egwc.DefaultGateway == extNet.ExternalNetwork.Name, + SubnetParticipation: []*types.SubnetParticipation{subnetParticipation}, + } + + egwConfiguration.Configuration.GatewayInterfaces.GatewayInterface = + append(egwConfiguration.Configuration.GatewayInterfaces.GatewayInterface, networkConf) + } + + // Once the configuration structure has been filled using the simplified data, we delegate + // the edge gateway creation to the main configuration function. + return CreateAndConfigureEdgeGatewayAsync(vcdClient, egwc.OrgName, egwc.VdcName, egwc.Name, egwConfiguration) +} + +// CreateAndConfigureEdgeGatewayAsync creates an edge gateway using a full configuration structure +func CreateAndConfigureEdgeGatewayAsync(vcdClient *VCDClient, orgName, vdcName, egwName string, egwConfiguration *types.EdgeGateway) (Task, error) { + + if egwConfiguration.Name != egwName { + return Task{}, fmt.Errorf("name mismatch: '%s' used as parameter but '%s' in the configuration structure", egwName, egwConfiguration.Name) + } + + egwConfiguration.Xmlns = types.XMLNamespaceVCloud + + adminOrg, err := vcdClient.GetAdminOrgByName(orgName) + if err != nil { + return Task{}, err + } + vdc, err := adminOrg.GetVDCByName(vdcName, false) + if err != nil { + return Task{}, err + } + + egwCreateHREF := vcdClient.Client.VCDHREF + + vdcId, err := getBareEntityUuid(vdc.Vdc.ID) + if err != nil { + return Task{}, fmt.Errorf("error retrieving ID from Vdc %s: %s", vdcName, err) + } + if vdcId == "" { + return Task{}, fmt.Errorf("error retrieving ID from Vdc %s - empty ID returned", vdcName) + } + egwCreateHREF.Path += fmt.Sprintf("/admin/vdc/%s/edgeGateways", vdcId) + + // The first task is the creation task. It is quick, and does only create the vCD entity, + // but not yet deploy the underlying VM + creationTask, err := vcdClient.Client.ExecuteTaskRequest(egwCreateHREF.String(), http.MethodPost, + "application/vnd.vmware.admin.edgeGateway+xml", "error instantiating a new Edge Gateway: %s", egwConfiguration) + + if err != nil { + return Task{}, err + } + + err = creationTask.WaitTaskCompletion() + + if err != nil { + return Task{}, err + } + + // After creation, there is a build task that supervises the gateway deployment + for _, innerTask := range creationTask.Task.Tasks.Task { + if innerTask.OperationName == "networkEdgeGatewayCreate" { + deployTask := Task{ + Task: innerTask, + client: &vcdClient.Client, + } + return deployTask, nil + } + } + return Task{}, fmt.Errorf("no deployment task found for edge gateway %s - The edge gateway might have been created, but not deployed properly", egwName) +} + +// Private convenience function used by CreateAndConfigureEdgeGateway and CreateEdgeGateway to +// process the task and return the object that was created. +// It should not be invoked directly. +func createEdgeGateway(vcdClient *VCDClient, egwc EdgeGatewayCreation, egwConfiguration *types.EdgeGateway) (EdgeGateway, error) { + var task Task + var err error + if egwConfiguration != nil { + task, err = CreateAndConfigureEdgeGatewayAsync(vcdClient, egwc.OrgName, egwc.VdcName, egwc.Name, egwConfiguration) + } else { + task, err = CreateEdgeGatewayAsync(vcdClient, egwc) + } + + if err != nil { + return EdgeGateway{}, err + } + err = task.WaitTaskCompletion() + if err != nil { + return EdgeGateway{}, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + // The edge gateway is created. Now we retrieve it from the server + org, err := vcdClient.GetAdminOrgByName(egwc.OrgName) + if err != nil { + return EdgeGateway{}, err + } + vdc, err := org.GetVDCByName(egwc.VdcName, false) + if err != nil { + return EdgeGateway{}, err + } + egw, err := vdc.GetEdgeGatewayByName(egwc.Name, false) + if err != nil { + return EdgeGateway{}, err + } + return *egw, nil +} + +// CreateAndConfigureEdgeGateway creates an edge gateway using a full configuration structure +func CreateAndConfigureEdgeGateway(vcdClient *VCDClient, orgName, vdcName, egwName string, egwConfiguration *types.EdgeGateway) (EdgeGateway, error) { + return createEdgeGateway(vcdClient, EdgeGatewayCreation{OrgName: orgName, VdcName: vdcName, Name: egwName}, egwConfiguration) +} + +// CreateEdgeGateway creates an edge gateway using a simplified configuration structure +func CreateEdgeGateway(vcdClient *VCDClient, egwc EdgeGatewayCreation) (EdgeGateway, error) { + return createEdgeGateway(vcdClient, egwc, nil) +} + +func getOrgByHref(vcdClient *Client, href string) (*Org, error) { + org := NewOrg(vcdClient) + + _, err := vcdClient.ExecuteRequest(href, http.MethodGet, + "", "error retrieving org list: %s", nil, org.Org) + if err != nil { + return nil, err + } + + tenantContext, err := org.getTenantContext() + if err != nil { + return nil, err + } + org.TenantContext = tenantContext + + return org, nil +} + +func getAdminOrgByHref(vcdClient *Client, href string) (*AdminOrg, error) { + adminOrg := NewAdminOrg(vcdClient) + + _, err := vcdClient.ExecuteRequest(href, http.MethodGet, + "", "error retrieving org list: %s", nil, adminOrg.AdminOrg) + if err != nil { + return nil, err + } + + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + adminOrg.TenantContext = tenantContext + + return adminOrg, nil +} + +// If user specifies a valid organization name, then this returns a +// organization object. If no valid org is found, it returns an empty +// org and no error. Otherwise it returns an error and an empty +// Org object +// Deprecated: Use vcdClient.GetOrgByName instead +func GetOrgByName(vcdClient *VCDClient, orgName string) (Org, error) { + orgUrl, err := getOrgHREF(vcdClient, orgName) + if err != nil { + return Org{}, fmt.Errorf("organization '%s' fetch failed: %s", orgName, err) + } + org := NewOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgUrl, http.MethodGet, + "", "error retrieving org list: %s", nil, org.Org) + if err != nil { + return Org{}, err + } + + return *org, nil +} + +// If user specifies valid organization name, +// then this returns an admin organization object. +// If no valid org is found, it returns an empty +// org and no error. Otherwise returns an empty AdminOrg +// and an error. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/GET-Organization-AdminView.html +// Deprecated: Use vcdClient.GetAdminOrgByName instead +func GetAdminOrgByName(vcdClient *VCDClient, orgName string) (AdminOrg, error) { + orgUrl, err := getOrgHREF(vcdClient, orgName) + if err != nil { + return AdminOrg{}, err + } + orgHREF := vcdClient.Client.VCDHREF + orgHREF.Path += "/admin/org/" + strings.Split(orgUrl, "/api/org/")[1] + + org := NewAdminOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgHREF.String(), http.MethodGet, + "", "error retrieving org: %s", nil, org.AdminOrg) + if err != nil { + return AdminOrg{}, err + } + + return *org, nil +} + +// Returns the HREF of the org with the name orgName +func getOrgHREF(vcdClient *VCDClient, orgName string) (string, error) { + orgListHREF := vcdClient.Client.VCDHREF + orgListHREF.Path += "/org" + + orgList := new(types.OrgList) + + _, err := vcdClient.Client.ExecuteRequest(orgListHREF.String(), http.MethodGet, + "", "error retrieving org list: %s", nil, orgList) + if err != nil { + return "", err + } + + // Look for orgName within OrgList + for _, org := range orgList.Org { + if org.Name == orgName { + return org.HREF, nil + } + } + return "", fmt.Errorf("couldn't find org with name: %s. Please check Org name as it is case sensitive", orgName) +} + +// Returns the HREF of the org from the org ID +func getOrgHREFById(vcdClient *VCDClient, orgId string) (string, error) { + orgListHREF := vcdClient.Client.VCDHREF + orgListHREF.Path += "/org" + + orgList := new(types.OrgList) + + _, err := vcdClient.Client.ExecuteRequest(orgListHREF.String(), http.MethodGet, + "", "error retrieving org list: %s", nil, orgList) + if err != nil { + return "", err + } + + orgUuid, err := getBareEntityUuid(orgId) + if err != nil { + return "", err + } + // Look for org UUID within OrgList + for _, org := range orgList.Org { + // ID in orgList is usually empty. We extract the UUID from HREF to make the comparison + uuidFromHref, err := GetUuidFromHref(org.HREF, true) + if err != nil { + return "", err + } + if uuidFromHref == orgUuid { + return org.HREF, nil + } + } + return "", fmt.Errorf("couldn't find org with ID: %s", orgId) +} + +// Find a list of Virtual Centers matching the filter parameter. +// Filter constructing guide: https://pubs.vmware.com/vcloud-api-1-5/wwhelp/wwhimpl/js/html/wwhelp.htm#href=api_prog/GUID-CDF04296-5EB5-47E1-9BEC-228837C584CE.html +// Possible parameters are any attribute from QueryResultVirtualCenterRecordType struct +// E.g. filter could look like: name==vC1 +func QueryVirtualCenters(vcdClient *VCDClient, filter string) ([]*types.QueryResultVirtualCenterRecordType, error) { + results, err := vcdClient.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "virtualCenter", + "filter": filter, + }) + if err != nil { + return nil, err + } + + return results.Results.VirtualCenterRecord, nil +} + +// Find a Network port group by name +func QueryNetworkPortGroup(vcdCli *VCDClient, name string) ([]*types.PortGroupRecordType, error) { + return QueryPortGroups(vcdCli, fmt.Sprintf("name==%s;portgroupType==%s", url.QueryEscape(name), "NETWORK")) +} + +// Find a Distributed port group by name +func QueryDistributedPortGroup(vcdCli *VCDClient, name string) ([]*types.PortGroupRecordType, error) { + return QueryPortGroups(vcdCli, fmt.Sprintf("name==%s;portgroupType==%s", url.QueryEscape(name), "DV_PORTGROUP")) +} + +// Find a list of Port groups matching the filter parameter. +func QueryPortGroups(vcdCli *VCDClient, filter string) ([]*types.PortGroupRecordType, error) { + results, err := vcdCli.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "portgroup", + "filter": filter, + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.PortGroupRecord, nil +} + +// GetExternalNetwork returns an ExternalNetwork reference if the network name matches an existing one. +// If no valid external network is found, it returns an empty ExternalNetwork reference and an error +// Deprecated: use vcdClient.GetExternalNetworkByName instead +func GetExternalNetwork(vcdClient *VCDClient, networkName string) (*ExternalNetwork, error) { + + if !vcdClient.Client.IsSysAdmin { + return &ExternalNetwork{}, fmt.Errorf("functionality requires System Administrator privileges") + } + + extNetworkHREF, err := getExternalNetworkHref(&vcdClient.Client) + if err != nil { + return &ExternalNetwork{}, err + } + + extNetworkRefs := &types.ExternalNetworkReferences{} + _, err = vcdClient.Client.ExecuteRequest(extNetworkHREF, http.MethodGet, + types.MimeNetworkConnectionSection, "error retrieving external networks: %s", nil, extNetworkRefs) + if err != nil { + return &ExternalNetwork{}, err + } + + externalNetwork := NewExternalNetwork(&vcdClient.Client) + + found := false + for _, netRef := range extNetworkRefs.ExternalNetworkReference { + if netRef.Name == networkName { + externalNetwork.ExternalNetwork.HREF = netRef.HREF + err = externalNetwork.Refresh() + found = true + if err != nil { + return &ExternalNetwork{}, err + } + } + } + + if found { + return externalNetwork, nil + } + return externalNetwork, fmt.Errorf("could not find external network named %s", networkName) + +} + +// GetExternalNetworks returns a list of available external networks +func (vcdClient *VCDClient) GetExternalNetworks() (*types.ExternalNetworkReferences, error) { + + if !vcdClient.Client.IsSysAdmin { + return nil, fmt.Errorf("functionality requires System Administrator privileges") + } + + extNetworkHREF, err := getExternalNetworkHref(&vcdClient.Client) + if err != nil { + return nil, err + } + + extNetworkRefs := &types.ExternalNetworkReferences{} + _, err = vcdClient.Client.ExecuteRequest(extNetworkHREF, http.MethodGet, + types.MimeNetworkConnectionSection, "error retrieving external networks: %s", nil, extNetworkRefs) + if err != nil { + return nil, err + } + + return extNetworkRefs, nil +} + +// GetExternalNetworkByName returns an ExternalNetwork reference if the network name matches an existing one. +// If no valid external network is found, it returns a nil ExternalNetwork reference and an error +func (vcdClient *VCDClient) GetExternalNetworkByName(networkName string) (*ExternalNetwork, error) { + + extNetworkRefs, err := vcdClient.GetExternalNetworks() + + if err != nil { + return nil, err + } + + externalNetwork := NewExternalNetwork(&vcdClient.Client) + + for _, netRef := range extNetworkRefs.ExternalNetworkReference { + if netRef.Name == networkName { + externalNetwork.ExternalNetwork.HREF = netRef.HREF + err = externalNetwork.Refresh() + if err != nil { + return nil, err + } + return externalNetwork, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetExternalNetworkById returns an ExternalNetwork reference if the network ID matches an existing one. +// If no valid external network is found, it returns a nil ExternalNetwork reference and an error +func (vcdClient *VCDClient) GetExternalNetworkById(id string) (*ExternalNetwork, error) { + + extNetworkRefs, err := vcdClient.GetExternalNetworks() + + if err != nil { + return nil, err + } + + externalNetwork := NewExternalNetwork(&vcdClient.Client) + + for _, netRef := range extNetworkRefs.ExternalNetworkReference { + // ExternalNetworkReference items don't have ID + // We compare using the UUID from HREF + if equalIds(id, "", netRef.HREF) { + externalNetwork.ExternalNetwork.HREF = netRef.HREF + err = externalNetwork.Refresh() + if err != nil { + return nil, err + } + return externalNetwork, nil + } + } + + return nil, ErrorEntityNotFound +} + +// GetExternalNetworkByNameOrId returns an ExternalNetwork reference if either the network name or ID matches an existing one. +// If no valid external network is found, it returns a nil ExternalNetwork reference and an error +func (vcdClient *VCDClient) GetExternalNetworkByNameOrId(identifier string) (*ExternalNetwork, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vcdClient.GetExternalNetworkByName(name) } + getById := func(id string, refresh bool) (interface{}, error) { return vcdClient.GetExternalNetworkById(id) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*ExternalNetwork), err +} + +// CreateExternalNetwork allows create external network and returns Task or error. +// types.ExternalNetwork struct is general and used for various types of networks. But for external network +// fence mode is always isolated, isInherited is false, parentNetwork is empty. +func CreateExternalNetwork(vcdClient *VCDClient, externalNetworkData *types.ExternalNetwork) (Task, error) { + + if !vcdClient.Client.IsSysAdmin { + return Task{}, fmt.Errorf("functionality requires System Administrator privileges") + } + + err := validateExternalNetwork(externalNetworkData) + if err != nil { + return Task{}, err + } + + // Type: VimObjectRefType + // Namespace: http://www.vmware.com/vcloud/extension/v1.5 + // https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VimObjectRefsType.html + // Description: Represents the Managed Object Reference (MoRef) and the type of a vSphere object. + // Since: 0.9 + type vimObjectRefCreate struct { + VimServerRef *types.Reference `xml:"vmext:VimServerRef"` + MoRef string `xml:"vmext:MoRef"` + VimObjectType string `xml:"vmext:VimObjectType"` + } + + // Type: VimObjectRefsType + // Namespace: http://www.vmware.com/vcloud/extension/v1.5 + // https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VimObjectRefsType.html + // Description: List of VimObjectRef elements. + // Since: 0.9 + type vimObjectRefsCreate struct { + VimObjectRef []*vimObjectRefCreate `xml:"vmext:VimObjectRef"` + } + + // Type: VMWExternalNetworkType + // Namespace: http://www.vmware.com/vcloud/extension/v1.5 + // https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VMWExternalNetworkType.html + // Description: External network type. + // Since: 1.0 + type externalNetworkCreate struct { + XMLName xml.Name `xml:"vmext:VMWExternalNetwork"` + XmlnsVmext string `xml:"xmlns:vmext,attr,omitempty"` + XmlnsVcloud string `xml:"xmlns:vcloud,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Link []*types.Link `xml:"Link,omitempty"` + Description string `xml:"vcloud:Description,omitempty"` + Tasks *types.TasksInProgress `xml:"Tasks,omitempty"` + Configuration *types.NetworkConfiguration `xml:"vcloud:Configuration,omitempty"` + VimPortGroupRef *vimObjectRefCreate `xml:"VimPortGroupRef,omitempty"` + VimPortGroupRefs *vimObjectRefsCreate `xml:"vmext:VimPortGroupRefs,omitempty"` + VCloudExtension *types.VCloudExtension `xml:"VCloudExtension,omitempty"` + } + + // Specific struct is used as two different name spaces needed for vCD API and return struct has diff name spaces + externalNetwork := &externalNetworkCreate{} + externalNetwork.HREF = externalNetworkData.HREF + externalNetwork.Description = externalNetworkData.Description + externalNetwork.Name = externalNetworkData.Name + externalNetwork.Type = externalNetworkData.Type + externalNetwork.ID = externalNetworkData.ID + externalNetwork.OperationKey = externalNetworkData.OperationKey + externalNetwork.Link = externalNetworkData.Link + externalNetwork.Configuration = externalNetworkData.Configuration + if externalNetwork.Configuration != nil { + externalNetwork.Configuration.Xmlns = types.XMLNamespaceVCloud + } + externalNetwork.VCloudExtension = externalNetworkData.VCloudExtension + externalNetwork.XmlnsVmext = types.XMLNamespaceExtension + externalNetwork.XmlnsVcloud = types.XMLNamespaceVCloud + externalNetwork.Type = types.MimeExternalNetwork + if externalNetworkData.VimPortGroupRefs != nil { + externalNetwork.VimPortGroupRefs = &vimObjectRefsCreate{} + for _, vimObjRef := range externalNetworkData.VimPortGroupRefs.VimObjectRef { + externalNetwork.VimPortGroupRefs.VimObjectRef = append(externalNetwork.VimPortGroupRefs.VimObjectRef, &vimObjectRefCreate{ + VimServerRef: vimObjRef.VimServerRef, + MoRef: vimObjRef.MoRef, + VimObjectType: vimObjRef.VimObjectType, + }) + } + } + if externalNetworkData.VimPortGroupRef != nil { + externalNetwork.VimPortGroupRef = &vimObjectRefCreate{ + VimServerRef: externalNetworkData.VimPortGroupRef.VimServerRef, + MoRef: externalNetworkData.VimPortGroupRef.MoRef, + VimObjectType: externalNetworkData.VimPortGroupRef.VimObjectType, + } + } + + externalNetHREF := vcdClient.Client.VCDHREF + externalNetHREF.Path += "/admin/extension/externalnets" + + if externalNetwork.Configuration == nil { + externalNetwork.Configuration = &types.NetworkConfiguration{} + } + externalNetwork.Configuration.FenceMode = "isolated" + + // Return the task + task, err := vcdClient.Client.ExecuteTaskRequest(externalNetHREF.String(), http.MethodPost, + types.MimeExternalNetwork, "error instantiating a new ExternalNetwork: %s", externalNetwork) + + if err != nil { + return Task{}, err + } + if task.Task == nil || task.Task.Tasks == nil || len(task.Task.Tasks.Task) == 0 { + return Task{}, fmt.Errorf("create external network task wasn't found") + } + // Real task in task array + task.Task = task.Task.Tasks.Task[0] + + return task, err +} + +func getExtension(client *Client) (*types.Extension, error) { + extensions := &types.Extension{} + + extensionHREF := client.VCDHREF + extensionHREF.Path += "/admin/extension/" + + _, err := client.ExecuteRequest(extensionHREF.String(), http.MethodGet, + "", "error retrieving extension: %s", nil, extensions) + + return extensions, err +} + +// GetStorageProfileByHref fetches storage profile using provided HREF. +// Deprecated: use client.GetStorageProfileByHref or vcdClient.GetStorageProfileByHref +func GetStorageProfileByHref(vcdClient *VCDClient, url string) (*types.VdcStorageProfile, error) { + return vcdClient.Client.GetStorageProfileByHref(url) +} + +// GetStorageProfileByHref fetches a storage profile using its HREF. +func (vcdClient *VCDClient) GetStorageProfileByHref(url string) (*types.VdcStorageProfile, error) { + return vcdClient.Client.GetStorageProfileByHref(url) +} + +// GetStorageProfileByHref fetches a storage profile using its HREF. +func (client *Client) GetStorageProfileByHref(url string) (*types.VdcStorageProfile, error) { + + vdcStorageProfile := &types.VdcStorageProfile{} + + _, err := client.ExecuteRequest(url, http.MethodGet, + "", "error retrieving storage profile: %s", nil, vdcStorageProfile) + if err != nil { + return nil, err + } + + return vdcStorageProfile, nil +} + +// QueryProviderVdcStorageProfileByName finds a provider VDC storage profile by name +// There are four cases: +// 1. [FOUND] The name matches and is unique among all the storage profiles +// 2. [FOUND] The name matches, it is not unique, and it is disambiguated by the provider VDC HREF +// 3. [NOT FOUND] The name matches, is not unique, but no Provider HREF was given: the search will fail +// 4. [NOT FOUND] The name does not match any of the storage profiles +func (vcdClient *VCDClient) QueryProviderVdcStorageProfileByName(name, providerVDCHref string) (*types.QueryResultProviderVdcStorageProfileRecordType, error) { + results, err := vcdClient.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdcStorageProfile", + "pageSize": "128", + }) + if err != nil { + return nil, err + } + + // Note: pageSize of 128 (the maximum page size allowed) should be enough to get all storage profiles. + // In case this is not true, we trap the error, so that we become aware that this assumption is incorrect. + // TODO: convert this query into a cumulativeQuery + if results.Results.Total > 128.0 { + return nil, fmt.Errorf("[QueryWithNotEncodedParams] FATAL - more than 128 storage profiles found. Refactory needed") + } + + var recs []*types.QueryResultProviderVdcStorageProfileRecordType + for _, rec := range results.Results.ProviderVdcStorageProfileRecord { + if rec.Name == name { + // Double match: both the name and the provider VDC match: we can return the result + if providerVDCHref != "" && providerVDCHref == rec.ProviderVdcHREF { + return rec, nil + } + // if there is a name match, but no provider VDC was given, we add to the result, and we will check later. + if providerVDCHref == "" { + recs = append(recs, rec) + } + } + } + + providerVDCMessage := "" + if providerVDCHref != "" { + providerVDCMessage = fmt.Sprintf("in provider VDC '%s'", providerVDCHref) + } + if len(recs) == 0 { + return nil, fmt.Errorf("no records found for storage profile '%s' %s", name, providerVDCMessage) + } + if len(recs) > 1 { + return nil, fmt.Errorf("more than 1 record found for storage profile '%s'. Add Provider VDC HREF in the search to disambiguate", name) + } + return recs[0], nil +} + +// QueryProviderVdcStorageProfileByName finds a provider VDC storage profile by name +// Deprecated: wrong implementation. Use VCDClient.QueryProviderVdcStorageProfileByName +func QueryProviderVdcStorageProfileByName(vcdCli *VCDClient, name string) ([]*types.QueryResultProviderVdcStorageProfileRecordType, error) { + results, err := vcdCli.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdcStorageProfile", + "filter": fmt.Sprintf("name==%s", url.QueryEscape(name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.ProviderVdcStorageProfileRecord, nil +} + +// QueryNetworkPoolByName finds a network pool by name +func QueryNetworkPoolByName(vcdCli *VCDClient, name string) ([]*types.QueryResultNetworkPoolRecordType, error) { + results, err := vcdCli.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "networkPool", + "filter": fmt.Sprintf("name==%s", url.QueryEscape(name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.NetworkPoolRecord, nil +} + +// QueryProviderVdcByName finds a provider VDC by name +func QueryProviderVdcByName(vcdCli *VCDClient, name string) ([]*types.QueryResultVMWProviderVdcRecordType, error) { + results, err := vcdCli.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdc", + "filter": fmt.Sprintf("name==%s", url.QueryEscape(name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.VMWProviderVdcRecord, nil +} + +// QueryProviderVdcs gets the list of available provider VDCs +func (vcdClient *VCDClient) QueryProviderVdcs() ([]*types.QueryResultVMWProviderVdcRecordType, error) { + results, err := vcdClient.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdc", + }) + if err != nil { + return nil, err + } + + return results.Results.VMWProviderVdcRecord, nil +} + +// QueryNetworkPools gets the list of network pools +func (vcdClient *VCDClient) QueryNetworkPools() ([]*types.QueryResultNetworkPoolRecordType, error) { + results, err := vcdClient.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "networkPool", + }) + if err != nil { + return nil, err + } + + return results.Results.NetworkPoolRecord, nil +} + +// QueryProviderVdcStorageProfiles gets the list of provider VDC storage profiles from ALL provider VDCs +// Deprecated: use either client.QueryProviderVdcStorageProfiles or client.QueryAllProviderVdcStorageProfiles +func (vcdClient *VCDClient) QueryProviderVdcStorageProfiles() ([]*types.QueryResultProviderVdcStorageProfileRecordType, error) { + return vcdClient.Client.QueryAllProviderVdcStorageProfiles() +} + +// QueryAllProviderVdcStorageProfiles gets the list of provider VDC storage profiles from ALL provider VDCs +func (client *Client) QueryAllProviderVdcStorageProfiles() ([]*types.QueryResultProviderVdcStorageProfileRecordType, error) { + results, err := client.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdcStorageProfile", + }) + if err != nil { + return nil, err + } + + return results.Results.ProviderVdcStorageProfileRecord, nil +} + +// QueryProviderVdcStorageProfiles gets the list of provider VDC storage profiles for a given Provider VDC +func (client *Client) QueryProviderVdcStorageProfiles(providerVdcHref string) ([]*types.QueryResultProviderVdcStorageProfileRecordType, error) { + results, err := client.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "providerVdcStorageProfile", + "filter": fmt.Sprintf("providerVdc==%s", providerVdcHref), + }) + if err != nil { + return nil, err + } + + return results.Results.ProviderVdcStorageProfileRecord, nil +} + +// QueryCompatibleStorageProfiles retrieves all storage profiles belonging to the same provider VDC to which +// the Org VDC belongs +func (adminVdc *AdminVdc) QueryCompatibleStorageProfiles() ([]*types.QueryResultProviderVdcStorageProfileRecordType, error) { + return adminVdc.client.QueryProviderVdcStorageProfiles(adminVdc.AdminVdc.ProviderVdcReference.HREF) +} + +// GetNetworkPoolByHREF functions fetches an network pool using VDC client and network pool href +func GetNetworkPoolByHREF(client *VCDClient, href string) (*types.VMWNetworkPool, error) { + util.Logger.Printf("[TRACE] Get network pool by HREF: %s\n", href) + + networkPool := &types.VMWNetworkPool{} + + _, err := client.Client.ExecuteRequest(href, http.MethodGet, + "", "error fetching network pool: %s", nil, networkPool) + + // Return the disk + return networkPool, err + +} + +// QueryOrgVdcNetworkByName finds a org VDC network by name which has edge gateway as reference +func QueryOrgVdcNetworkByName(vcdCli *VCDClient, name string) ([]*types.QueryResultOrgVdcNetworkRecordType, error) { + results, err := vcdCli.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "orgVdcNetwork", + "filter": fmt.Sprintf("name==%s", url.QueryEscape(name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.OrgVdcNetworkRecord, nil +} + +// QueryAllVdcs returns all Org VDCs in a VCD instance +// +// This function requires "System" user or returns an error +func (client *Client) QueryAllVdcs() ([]*types.QueryResultOrgVdcRecordType, error) { + if !client.IsSysAdmin { + return nil, errors.New("this function only works with 'System' user") + } + return queryOrgVdcList(client, nil) +} + +// QueryNsxtManagerByName searches for NSX-T managers available in VCD +func (vcdClient *VCDClient) QueryNsxtManagerByName(name string) ([]*types.QueryResultNsxtManagerRecordType, error) { + results, err := vcdClient.QueryWithNotEncodedParams(nil, map[string]string{ + "type": "nsxTManager", + "filter": fmt.Sprintf("name==%s", url.QueryEscape(name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + + return results.Results.NsxtManagerRecord, nil +} + +// GetOrgByName finds an Organization by name +// On success, returns a pointer to the Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetOrgByName(orgName string) (*Org, error) { + orgUrl, err := getOrgHREF(vcdClient, orgName) + if err != nil { + // Since this operation is a lookup from a list, we return the standard ErrorEntityNotFound + return nil, ErrorEntityNotFound + } + org := NewOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgUrl, http.MethodGet, + "", "error retrieving org: %s", nil, org.Org) + if err != nil { + return nil, err + } + org.TenantContext = &TenantContext{ + OrgId: extractUuid(org.Org.ID), + OrgName: org.Org.Name, + } + return org, nil +} + +// GetOrgById finds an Organization by ID +// On success, returns a pointer to the Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetOrgById(orgId string) (*Org, error) { + orgUrl, err := getOrgHREFById(vcdClient, orgId) + if err != nil { + // Since this operation is a lookup from a list, we return the standard ErrorEntityNotFound + return nil, ErrorEntityNotFound + } + org := NewOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgUrl, http.MethodGet, + "", "error retrieving org list: %s", nil, org.Org) + if err != nil { + return nil, err + } + org.TenantContext = &TenantContext{ + OrgId: extractUuid(org.Org.ID), + OrgName: org.Org.Name, + } + return org, nil +} + +// GetOrgByNameOrId finds an Organization by name or ID +// On success, returns a pointer to the Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetOrgByNameOrId(identifier string) (*Org, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vcdClient.GetOrgByName(name) } + getById := func(id string, refresh bool) (interface{}, error) { return vcdClient.GetOrgById(id) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*Org), err +} + +// GetAdminOrgByName finds an Admin Organization by name +// On success, returns a pointer to the Admin Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetAdminOrgByName(orgName string) (*AdminOrg, error) { + orgUrl, err := getOrgHREF(vcdClient, orgName) + if err != nil { + return nil, ErrorEntityNotFound + } + orgHREF := vcdClient.Client.VCDHREF + orgHREF.Path += "/admin/org/" + strings.Split(orgUrl, "/api/org/")[1] + + adminOrg := NewAdminOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgHREF.String(), http.MethodGet, + "", "error retrieving org: %s", nil, adminOrg.AdminOrg) + if err != nil { + return nil, err + } + adminOrg.TenantContext = &TenantContext{ + OrgId: extractUuid(adminOrg.AdminOrg.ID), + OrgName: adminOrg.AdminOrg.Name, + } + + return adminOrg, nil +} + +// GetAdminOrgById finds an Admin Organization by ID +// On success, returns a pointer to the Admin Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetAdminOrgById(orgId string) (*AdminOrg, error) { + orgUrl, err := getOrgHREFById(vcdClient, orgId) + if err != nil { + return nil, ErrorEntityNotFound + } + orgHREF := vcdClient.Client.VCDHREF + orgHREF.Path += "/admin/org/" + strings.Split(orgUrl, "/api/org/")[1] + + adminOrg := NewAdminOrg(&vcdClient.Client) + + _, err = vcdClient.Client.ExecuteRequest(orgHREF.String(), http.MethodGet, + "", "error retrieving org: %s", nil, adminOrg.AdminOrg) + if err != nil { + return nil, err + } + adminOrg.TenantContext = &TenantContext{ + OrgId: extractUuid(adminOrg.AdminOrg.ID), + OrgName: adminOrg.AdminOrg.Name, + } + return adminOrg, nil +} + +// GetAdminOrgByNameOrId finds an Admin Organization by name or ID +// On success, returns a pointer to the Admin Org structure and a nil error +// On failure, returns a nil pointer and an error +func (vcdClient *VCDClient) GetAdminOrgByNameOrId(identifier string) (*AdminOrg, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vcdClient.GetAdminOrgByName(name) } + getById := func(id string, refresh bool) (interface{}, error) { return vcdClient.GetAdminOrgById(id) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*AdminOrg), err +} + +// Returns the UUID part of an HREF +// Similar to getBareEntityUuid, but tailored to HREF +func GetUuidFromHref(href string, idAtEnd bool) (string, error) { + util.Logger.Printf("[TRACE] GetUuidFromHref got href: %s with idAtEnd: %t", href, idAtEnd) + // Regular expression to match an ID: + // 1 string starting by 'https://' and ending with a '/', + // followed by + // 1 group of 8 hexadecimal digits + // 3 groups of 4 hexadecimal digits + // 1 group of 12 hexadecimal digits + + searchExpression := `^https://.+/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})` + if idAtEnd { + searchExpression += `$` + } else { + searchExpression += `.*$` + } + reGetID := regexp.MustCompile(searchExpression) + matchList := reGetID.FindAllStringSubmatch(href, -1) + + if len(matchList) == 0 || len(matchList[0]) < 2 { + return "", fmt.Errorf("error extracting UUID from '%s'", href) + } + util.Logger.Printf("[TRACE] GetUuidFromHref returns UUID : %s", matchList[0][1]) + return matchList[0][1], nil +} + +// GetOrgList returns the list ov available orgs +func (vcdClient *VCDClient) GetOrgList() (*types.OrgList, error) { + orgListHREF := vcdClient.Client.VCDHREF + orgListHREF.Path += "/org" + + orgList := new(types.OrgList) + + _, err := vcdClient.Client.ExecuteRequest(orgListHREF.String(), http.MethodGet, + "", "error getting list of organizations: %s", nil, orgList) + if err != nil { + return nil, err + } + return orgList, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/task.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/task.go new file mode 100644 index 000000000..24c5f588e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/task.go @@ -0,0 +1,198 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type Task struct { + Task *types.Task + client *Client +} + +func NewTask(cli *Client) *Task { + return &Task{ + Task: new(types.Task), + client: cli, + } +} + +// If the error is not nil, composes an error message +// made of the error itself + the information from the task's Error component. +// See: +// https://code.vmware.com/apis/220/vcloud#/doc/doc/types/TaskType.html +// https://code.vmware.com/apis/220/vcloud#/doc/doc/types/ErrorType.html +func (task *Task) getErrorMessage(err error) string { + errorMessage := "" + if err != nil { + errorMessage = err.Error() + } + if task.Task.Error != nil { + errorMessage += " [" + + fmt.Sprintf("%d:%s", + task.Task.Error.MajorErrorCode, // The MajorError is a numeric code + task.Task.Error.MinorErrorCode) + // The MinorError is a string with a generic definition of the error + "] - " + task.Task.Error.Message + } + return errorMessage +} + +func (task *Task) Refresh() error { + + if task.Task == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + refreshUrl := urlParseRequestURI(task.Task.HREF) + + req := task.client.NewRequest(map[string]string{}, http.MethodGet, *refreshUrl, nil) + + resp, err := checkResp(task.client.Http.Do(req)) + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + task.Task = &types.Task{} + + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return fmt.Errorf("error decoding task response: %s", task.getErrorMessage(err)) + } + + // The request was successful + return nil +} + +// This callback function can be passed to task.WaitInspectTaskCompletion +// to perform user defined operations +// * task is the task object being processed +// * howManyTimes is the number of times the task has been refreshed +// * elapsed is how much time since the task was initially processed +// * first is true if this is the first refresh of the task +// * last is true if the function is being called for the last time. +type InspectionFunc func(task *types.Task, howManyTimes int, elapsed time.Duration, first, last bool) + +// Customizable version of WaitTaskCompletion. +// Users can define the sleeping duration and an optional callback function for +// extra monitoring. +func (task *Task) WaitInspectTaskCompletion(inspectionFunc InspectionFunc, delay time.Duration) error { + + if task.Task == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + taskMonitor := os.Getenv("GOVCD_TASK_MONITOR") + howManyTimesRefreshed := 0 + startTime := time.Now() + for { + howManyTimesRefreshed++ + elapsed := time.Since(startTime) + err := task.Refresh() + if err != nil { + return fmt.Errorf("error retrieving task: %s", err) + } + + // If an inspection function is provided, we pass information about the task processing: + // * the task itself + // * the number of iterations + // * how much time we have spent querying the task so far + // * whether this is the first iteration + // * whether this is the last iteration + // It's up to the inspection function to render this information fittingly. + + // If task is not in a waiting status we're done, check if there's an error and return it. + if task.Task.Status != "queued" && task.Task.Status != "preRunning" && task.Task.Status != "running" { + if inspectionFunc != nil { + inspectionFunc(task.Task, + howManyTimesRefreshed, + elapsed, + howManyTimesRefreshed == 1, // first + task.Task.Status == "error" || task.Task.Status == "success", // last + ) + } + if task.Task.Status == "error" { + return fmt.Errorf("task did not complete successfully: %s", task.getErrorMessage(err)) + } + return nil + } + + // If the environment variable "GOVCD_TASK_MONITOR" is set, its value + // will be used to choose among pre-defined InspectionFunc + if inspectionFunc == nil { + if taskMonitor != "" { + switch taskMonitor { + case "log": + inspectionFunc = LogTask // writes full task details to the log + case "show": + inspectionFunc = ShowTask // writes full task details to the screen + case "simple_log": + inspectionFunc = SimpleLogTask // writes a summary line for the task to the log + case "simple_show": + inspectionFunc = SimpleShowTask // writes a summary line for the task to the screen + } + } + } + if inspectionFunc != nil { + inspectionFunc(task.Task, + howManyTimesRefreshed, + elapsed, + howManyTimesRefreshed == 1, // first + false, // last + ) + } + + // Sleep for a given period and try again. + time.Sleep(delay) + } +} + +// Checks the status of the task every 3 seconds and returns when the +// task is either completed or failed +func (task *Task) WaitTaskCompletion() error { + return task.WaitInspectTaskCompletion(nil, 3*time.Second) +} + +func (task *Task) GetTaskProgress() (string, error) { + if task.Task == nil { + return "", fmt.Errorf("cannot refresh, Object is empty") + } + + err := task.Refresh() + if err != nil { + return "", fmt.Errorf("error retreiving task: %s", err) + } + + if task.Task.Status == "error" { + return "", fmt.Errorf("task did not complete successfully: %s", task.getErrorMessage(err)) + } + + return strconv.Itoa(task.Task.Progress), nil +} + +func (task *Task) CancelTask() error { + cancelTaskURL, err := url.ParseRequestURI(task.Task.HREF + "/action/cancel") + if err != nil { + util.Logger.Printf("[Error] Error cancelling task %v: %s", cancelTaskURL.String(), err) + return err + } + + request := task.client.NewRequest(map[string]string{}, http.MethodPost, *cancelTaskURL, nil) + _, err = checkResp(task.client.Http.Do(request)) + if err != nil { + util.Logger.Printf("[Error] Error cancelling task %v: %s", cancelTaskURL.String(), err) + return err + } + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/tenant_context.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/tenant_context.go new file mode 100644 index 000000000..6f540cd1d --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/tenant_context.go @@ -0,0 +1,185 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ +package govcd + +import ( + "fmt" + "strings" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +// TenantContext stores the information needed for an object to be used in the context of a given organization +type TenantContext struct { + OrgId string // The bare ID (without prefix) of an organization + OrgName string // The organization name +} + +// organization is an abstraction of types Org and AdminOrg +type organization interface { + orgId() string + orgName() string + tenantContext() (*TenantContext, error) + fullObject() interface{} +} + +//lint:ignore U1000 for future usage +type genericVdc interface { + vdcId() string + vdcName() string + vdcParent() interface{} +} + +//lint:ignore U1000 for future usage +type genericCatalog interface { + catalogId() string + catalogName() string + catalogParent() interface{} +} + +// Implementation of organization interface for Org +func (org *Org) orgId() string { return org.Org.ID } +func (org *Org) orgName() string { return org.Org.Name } +func (org *Org) tenantContext() (*TenantContext, error) { return org.getTenantContext() } +func (org *Org) fullObject() interface{} { return org } + +// Implementation of organization interface for AdminOrg +func (adminOrg *AdminOrg) orgId() string { return adminOrg.AdminOrg.ID } +func (adminOrg *AdminOrg) orgName() string { return adminOrg.AdminOrg.Name } +func (adminOrg *AdminOrg) tenantContext() (*TenantContext, error) { return adminOrg.getTenantContext() } +func (adminOrg *AdminOrg) fullObject() interface{} { return adminOrg } + +// Implementation of genericVdc interface for Vdc +func (vdc *Vdc) vdcId() string { return vdc.Vdc.ID } +func (vdc *Vdc) vdcName() string { return vdc.Vdc.Name } +func (vdc *Vdc) vdcParent() interface{} { return vdc.parent } + +// Implementation of genericVdc interface for AdminVdc +func (adminVdc *AdminVdc) vdcId() string { return adminVdc.AdminVdc.ID } +func (adminVdc *AdminVdc) vdcName() string { return adminVdc.AdminVdc.Name } +func (adminVdc *AdminVdc) vdcParent() interface{} { return adminVdc.parent } + +// Implementation of genericCatalog interface for AdminCatalog +func (adminCatalog *AdminCatalog) catalogId() string { return adminCatalog.AdminCatalog.ID } +func (adminCatalog *AdminCatalog) catalogName() string { return adminCatalog.AdminCatalog.Name } +func (adminCatalog *AdminCatalog) catalogParent() interface{} { return adminCatalog.parent } + +// Implementation of genericCatalog interface for AdminCatalog +func (catalog *Catalog) catalogId() string { return catalog.Catalog.ID } +func (catalog *Catalog) catalogName() string { return catalog.Catalog.Name } +func (catalog *Catalog) catalogParent() interface{} { return catalog.parent } + +// getTenantContext returns the tenant context information for an Org +// If the information was not stored, it gets created and stored for future use +func (org *Org) getTenantContext() (*TenantContext, error) { + if org.TenantContext == nil { + id, err := getBareEntityUuid(org.Org.ID) + if err != nil { + return nil, err + } + org.TenantContext = &TenantContext{ + OrgId: id, + OrgName: org.Org.Name, + } + } + return org.TenantContext, nil +} + +// getTenantContext returns the tenant context information for an AdminOrg +// If the information was not stored, it gets created and stored for future use +func (org *AdminOrg) getTenantContext() (*TenantContext, error) { + if org.TenantContext == nil { + id, err := getBareEntityUuid(org.AdminOrg.ID) + if err != nil { + return nil, err + } + org.TenantContext = &TenantContext{ + OrgId: id, + OrgName: org.AdminOrg.Name, + } + } + return org.TenantContext, nil +} + +// getTenantContext retrieves the tenant context for an AdminVdc +func (vdc *AdminVdc) getTenantContext() (*TenantContext, error) { + org := vdc.parent + + if org == nil { + return nil, fmt.Errorf("VDC %s has no parent", vdc.AdminVdc.Name) + } + return org.tenantContext() +} + +// getTenantContext retrieves the tenant context for a VDC +func (vdc *Vdc) getTenantContext() (*TenantContext, error) { + org := vdc.parent + + if org == nil { + return nil, fmt.Errorf("VDC %s has no parent", vdc.Vdc.Name) + } + return org.tenantContext() +} + +// getTenantContext retrieves the tenant context for an AdminCatalog +func (catalog *AdminCatalog) getTenantContext() (*TenantContext, error) { + org := catalog.parent + + if org == nil { + return nil, fmt.Errorf("catalog %s has no parent", catalog.AdminCatalog.Name) + } + return org.tenantContext() +} + +// getTenantContext retrieves the tenant context for a Catalog +func (catalog *Catalog) getTenantContext() (*TenantContext, error) { + org := catalog.parent + + if org == nil { + return nil, fmt.Errorf("catalog %s has no parent", catalog.Catalog.Name) + } + return org.tenantContext() +} + +// getTenantContextHeader returns a map of strings containing the tenant context items +// needed to be used in http.Request.Header +func getTenantContextHeader(tenantContext *TenantContext) map[string]string { + if tenantContext == nil { + return nil + } + if tenantContext.OrgName == "" || strings.EqualFold(tenantContext.OrgName, "system") { + return nil + } + return map[string]string{ + types.HeaderTenantContext: tenantContext.OrgId, + types.HeaderAuthContext: tenantContext.OrgName, + } +} + +// getTenantContextFromHeader does the opposite of getTenantContextHeader: +// given a header, returns a TenantContext +func getTenantContextFromHeader(header map[string]string) *TenantContext { + if len(header) == 0 { + return nil + } + tenantContext, okTenant := header[types.HeaderTenantContext] + AuthContext, okAuth := header[types.HeaderAuthContext] + if okTenant && okAuth { + return &TenantContext{ + OrgId: tenantContext, + OrgName: AuthContext, + } + } + return nil +} + +// getTenantContext retrieves the tenant context for a VdcGroup +func (vdcGroup *VdcGroup) getTenantContext() (*TenantContext, error) { + org := vdcGroup.parent + + if org == nil { + return nil, fmt.Errorf("VDC group %s has no parent", vdcGroup.VdcGroup.Name) + } + return org.tenantContext() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/cert.pem b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/cert.pem new file mode 100644 index 000000000..81728c556 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA6+gAwIBAgIUVbryzlRw0ahAY7e9sCTJQN5Q5VowDQYJKoZIhvcNAQEL +BQAwcjELMAkGA1UEBhMCVVMxDzANBgNVBAgMBk9yZWdvbjERMA8GA1UEBwwIUG9y +dGxhbmQxFTATBgNVBAoMDENvbXBhbnkgTmFtZTEMMAoGA1UECwwDT3JnMRowGAYD +VQQDDBFvdGhlci5leGFtcGxlLmNvbTAgFw0yMTEwMTIxMTExMjdaGA80NzU5MDkw +ODExMTEyN1owcjELMAkGA1UEBhMCVVMxDzANBgNVBAgMBk9yZWdvbjERMA8GA1UE +BwwIUG9ydGxhbmQxFTATBgNVBAoMDENvbXBhbnkgTmFtZTEMMAoGA1UECwwDT3Jn +MRowGAYDVQQDDBFvdGhlci5leGFtcGxlLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAPFoIdcexAQJ86OgmU7pS8Wli887AEBUfjIm57vLa7aESwr1 +iI9nABH1Nfgxewj3wp/NtGBpv1TpmlK2L76Wu5veVQ+HnhVZvm+Ya0mIRtbwUyyQ +WN+ECaJ+E6IGFJqGJjrb5ERu6UOK1CzD5gpaKzHfA0oLWyUzmS6js3Cv8Ln4WiYH +qK7V1ktFU7pABZk3n58oBYZ+KPzThzuUJqrv0PnYpl/Q5WvpWlEpt1P/IsRLKOop +q1nMWBB3QKhGAMdaxZELUbw19+9+cEiQZUruOVYBnzKZQMItmIkr+aWRk/XmHn92 +4f13RtPLM4uSWGmr2uG5IBwquxfeJsxSPn9nocs8uTJ9JRodTpyLGbqFdw4Vw10h +X6LRMvyEuuNvUpKMTF8lGL3v+hIXfx222aB7pH+hnRYHKNb+m0j+J2MQ9O/MNrHz +LRt/90t8YqHmJBOK9iDGKTjgmuZlshyfgvy89nzlvbKc90df3VI6To/TIKt5tBdC +jXLxQ+TL6DGL25uPpa7ZHyuKAywHhKBZV6R4jY4wuRuH38LX0fkMdOToYwKZA38M +5QzTCs9SXtoark3DtKwqaMHWdJk9BviatVaNmLLLerkYDMY/rjR3pGcaa8wCCPNY +HbzNbC6rD8eyaCluUFVoXLQyJcacA7wzhSR/jeC9G70onPlx0SWl+zzwM5udAgMB +AAGjUzBRMB0GA1UdDgQWBBRw8FkiYFaoUxPOLBkt43TQ1nK36DAfBgNVHSMEGDAW +gBRw8FkiYFaoUxPOLBkt43TQ1nK36DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4ICAQDSAqiJKyO2WXgsL0sl6iryjEijQ/S+dnrk0ICkCiy3bZYb1b3x +hHtBYN3aV93GTUCX+qypT8KXJzFSuZKKq5Hh00Hk6YhjO0hzUYvsbkfqluuj4ds7 +W1y+s7lUt9AM6XEfs84D1HZz/ez3vRYZ6pvS0hbO2JhEFq/4gPc0GR75K3elBiwd +WYKr3Aup9A8gBED0xzSnp5fb4si873DaN68xw5e/KPvYvZFTIZxc4XWwkP4alMcw +aIlajFR+szGJo4NibfiwBWRtq1yvi9wg/roRiI24kAqEh08pTxiFyq7209DjbW6a +iWH/qtbzmiBxcdqshFPBjry9oxkujWnjfZ7diwMvjUz5OnOVYJHRL5LFd5bwGbpq +oZZ9mv2z1srveRN7Nne8NKM94aZnPj/xBYbIvJencF5Yxh3rM6Y/nDP+5mtf/+Ks +qToqFc3nvIiq7AQicR7kdjPAHjoyCMKBSYO+oNKjhtj5+QHXwToumcriESJAEFg9 +JmftGH5Defg90di/AUmJD60nQ1rgclt1huxpZRurSeawtemEvEorB22bPpvzEgX7 +xb7OSp96aoww4GQ4H7Va3uaxiNnuRPdYIis3Alf7bPw0t1A9I1XKR6cA2vSttmum +1LxNJS0LKnEhMm3fy8g+TiubQYSOyT6qkUiu+J+rSwrDo1QzmvouQD+Jgg== +-----END CERTIFICATE----- diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/key.pem b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/key.pem new file mode 100644 index 000000000..d63b4c432 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/test-resources/key.pem @@ -0,0 +1,54 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIJnDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQIyZJXaPnPUVgCAggA +MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECMwPqNNhiaSbBIIJSHGxsPf33TZn +xUbOZJcoRk7DNxnUMwNgbUnK+WL213AU0IAaJ5qMsNO8dwk8oOSkNs2K66h9ZGUI +8GXP/76ndkZEbNy6Xk2Uu7PI+mGSItAQBLdG8izPgYALlEVscqRbDb4NP2v9H7Eu +oJFvLuxL2xxjC8QL1R/MKqo4ZllKoCxzwdE949UPViPbpm49wMgUOEeS8Kwv0N/1 +HI8o7+KI5enPaIYUjQF6HdkZtcJ/zcMbNzdMNfPfQljBHl1KmGQfVV92Vaoa+E9a +bj4vf1PUd2Y4QCLhT8yxEYHSm4x7szky3ecsA2lLZPluuEKQOmDdcC596wCQ4Mmv +/p32xpzuT4eoDBeDHPCM2AdJl/lXFEgdvZQrNUsYGHHhf79aEsZbbREHVxMkcSPz +3K0IoWAPf3c3zTtG9MJEHLZavwBuGOu5xHIXnL6VIOWHvok/lhNPGbE6azXVTXjD +9tXV/478GDa5XnGxOzBrsIIWGqf0OMbySG2YIIr7g7BhsQpVVOgCQE3UsVMtWrud +UUXC68kdreT37V1zDkqpHyUHydvx1eSDAdJHmhnEYW+Dolk80IhfZcYUKEDQEO0z +nIdfcNjYgKljyvqHRADoK+eNV6p75KMDY5f/E4bGxpirrrijj6duFm+dtDTWNtIm +RgOj/eLrshbgxRr0GBZSupu0tN86+/TuL/OK9L3yWbJJ1vv+vwGisKysiy2m0D7q +KnTDK9hpSFOJg2MY7DWgaKl/qAXHgjG3YPfP0T69FYX/mf1tfjwwd3f4GfBdUPus +7RQJ6nZlk23ajrbvQBYF9KRCelQhVd81h3puSWc2Ip9IVvxb8eZ9s0gGpCBTGj9o +SFHPStYA8U0h5JEJOkHniy3apytjVmYa+CWBglLF6R+EEDSwZKDsZhWBozxPpUuC +9p7Lyqj3syJUdfnoCj8jcqWq3dIbWrjsWaKQLVJJW2uNrdNQS7hJpgSaug3qBkFG +I4hdK+gszYH9qTRrfAXcgqo4wGEELc0eCXhrHETrD98GGT7UpYzONxQ1wYOJ5u+f +OpTpb2pap08KihLBq4cdwi+S+6alWjEqDPxaZ/4cS2FZCPX/JcghOydEWeH8IarO +V/iT7wXF4b5yw/SfLq0bsHS8hiGuq9HE3QQrj+1b1Q0I2pxec3Kjvrqd3GUJjk5p ++MysONs2LGQu8+j8EHodQZjfSpjNmGhqXA7ligmNvfHayT4925xrZZfajUsl8dOO +a6yNQ6uXUd65JpQz8JOSPqgfhH7IPTSyRgQA9zltkt69w79E370n1cCz15/ujsm6 +JRJsD1519NZhXDOanx5BBQdlMqp+1CsAZNbKMKv4H9hwVJnN+sudOPIXB7cGeeVs ++xOZsLlw9MgJNOvjbVsFdhduQINcWiKtLkOWST0cZD5uFtvRPpZGvYCQvkbIN8BT +Qs7J21MFtpmodMLK1AIu3jQJrUScpVGgvsleP/esmibnhi5wO571DT1fDqLXm4aV +DodEAejG6UgMF2oxK3x5wuVxI6NZYZYjS+PB7HhaFKnRBf5IuFmk+MYJWab9Md+r +C99Ra/l8SeGmrSw5q1wsUUw6rFOi4hn2jm7u8/oqc2h1z1chmAOQyqaM3Fp0PZRp +ZO6rMRjSWDRBNrpZU1dEAFxbQ8vsWtkvX07Ov3vSpXUHKg229BFAKAsOnUAhu8MI +8dT+k5AlTjmRq5NoeLhqkcN+SWYaIe6A46pCY8sDJPs1XUR/dbQVZLcaQCSkcVZQ +VhAy3t1f2GY8vfCZrW6z4C5v0Zj/Vt/JtkClr8EEkGKuwuPHgca1a7wXXmVA/P4G +meigHEE6WHIkbjZu6uq2pn4KxhZLhUWg3EXewOQWxNpWzPac8lE5W2/w/tsAb6+o +xtixRCjAOxin8hJP72aL/kmbBKfrH/8Wkh7uusWSLvJ+iSYOBYRUZ7TKW7nuasXx +hk3aw7SCEHBkjSU+hmUjaren08R6Cja2usgmIXZPwnO1gTsn8f39CnIlJQ6XbxN+ +IuzHWnY45ihadez6JHfxTbT09hlaO04ojlBLhEyNHJF0r2+LGOcJqGz6SZEHoc4n +21EGZ4uN6wVZep8Y+telu3h5rBEKc+gV4S8qOCHPkdb9aavCrD+OowVQh4QIHGEr +m17oGAyBg184DLQvDc4680wGR2R9av3aDs7CK396pVmBx/OBwTODCejJ3t7NT3DS ++X5P35zNNM6WMZknWRJk9i3kT4NL7kBx0j30o7J4pKnhtW2VLDekaDPp25eqkRLw +c3re0lcpJF88OqUrH46o7t6XiuXWkFmoJYKqUEqemWiXX94NHpnzROpIk03Td9Mj +9dQI+KdqjWSazhYQSulbOAU9k4N5IiZgB4tsPY2vZs9iowIrw/BIZrmChx3d63Od +8uAHN3hldPN2Myqe0p7DA7V0p9Nr4qqM0JbeG7EEDf4XjgeytLYWRm5TJmNBiqA1 +40IWIqHcjQpqf6eD0RzxO0mcy4ZoeIU7dXMzdNsqg66yxRGynHmkKinrwoqsADV9 +gEWVdPqsSgNLycpcuGcOsdH1lTMDovrHvLzOnk8nT/F1LgH2czzRxHcrx2/2Zm60 +nkZ/sU5FKVAE7imuUbYyKRm6tXxLx7a/NNtac51GgcNXC808ycO2KdNJpqsgO6Ub +1apZe3+WxyiWSRMaQ+Dw7GlR4ZXgdtQTAniDe8I3Be8ZI1uHQtth+IN9mA9CLIb6 +9qQy3eOo5Ip4RIVTdOX3rVGOzOPD57T2K4Cg2XDgWC8g2o+X6Sgv0orwwoqR1QWa +HN6VOc82KonKAdo1ctnrxWq7JodhQOqZvFRBxvRV9Rdww/GKKE8q0PHYBGNfGAXY +MC9vflAFIRTaiNTGkmpUwWg0iTaXGu2dVCO/4yTUBVcfubzPacXFocxU0zcEk/ll +M2yoil/kfRu8JZav9/RZpxLZMz5c32We3xePnDUcSpFYhFdF8MILLc8YaRZ6xRRT +hwQb1w9x0YvrtUqEE1k8UmSKP0P+H4LfunGp6uTU2zOTHulHQe2VCaARuRO6K3HG +lDT9qQZ7HVxF+4vuLPMv5pFwjVoxi4LK9NBL0bJ1VoaSQ/uqOjYxdv00bo1gb9I4 +fdRskEjVssBI4Aie5ml/CShML1Unys6hLtvmsY+5pFcXRhgoPRPJDVghkqqe7rJh +X/zTeUPmGZPiLyL4CMCPoQ== +-----END ENCRYPTED PRIVATE KEY----- diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/upload.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/upload.go new file mode 100644 index 000000000..eaa170a8f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/upload.go @@ -0,0 +1,271 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// mutexedProgress is a thread-safe structure to update and report progress during an UploadTask. +// +// Value must be read/written using LockedGet/LockedSet values instead of directly accessing the `progress` variable +type mutexedProgress struct { + progress float64 + sync.Mutex +} + +func (p *mutexedProgress) LockedSet(progress float64) { + p.Lock() + defer p.Unlock() + p.progress = progress +} + +func (p *mutexedProgress) LockedGet() float64 { + p.Lock() + defer p.Unlock() + return p.progress +} + +// uploadLink - vCD created temporary upload link +// uploadedBytes - how much of file already uploaded +// fileSizeToUpload - how much bytes will be uploaded +// uploadPieceSize - size of chunks in which the file will be uploaded to the catalog. +// uploadedBytesForCallback all uploaded bytes if multi disk in ova +// allFilesSize overall sum of size if multi disk in ova +// callBack a function with signature //function(bytesUpload, totalSize) to let the caller monitor progress of the upload operation. +type uploadDetails struct { + uploadLink string + uploadedBytes, fileSizeToUpload, uploadPieceSize, uploadedBytesForCallback, allFilesSize int64 + callBack func(bytesUpload, totalSize int64) + uploadError *error +} + +// Upload file by parts which size is defined by user provided variable uploadPieceSize and +// provides how much bytes uploaded to callback. Callback allows to monitor upload progress. +// params: +// client - client for requests +// filePath - file path to file which will be uploaded +// uploadDetails - file upload settings and data +func uploadFile(client *Client, filePath string, uDetails uploadDetails) (int64, error) { + util.Logger.Printf("[TRACE] Starting uploading: %s, offset: %v, fileze: %v, toLink: %s \n", filePath, uDetails.uploadedBytes, uDetails.fileSizeToUpload, uDetails.uploadLink) + + var part []byte + var count int + var pieceSize int64 + + // #nosec G304 - linter does not like 'filePath' to be a variable. However this is necessary for file uploads. + file, err := os.Open(filePath) + if err != nil { + util.Logger.Printf("[ERROR] during upload process - file open issue : %s, error %s ", filePath, err) + *uDetails.uploadError = err + return 0, err + } + + fileInfo, err := file.Stat() + if err != nil { + util.Logger.Printf("[ERROR] during upload process - file issue : %s, error %s ", filePath, err) + *uDetails.uploadError = err + return 0, err + } + + defer file.Close() + + fileSize := fileInfo.Size() + // when file size in OVF does not exist, use real file size instead + if uDetails.fileSizeToUpload == -1 { + uDetails.fileSizeToUpload = fileSize + uDetails.allFilesSize += fileSize + } + // TODO: file size in OVF maybe wrong? how to handle that? + if uDetails.fileSizeToUpload != fileSize { + fmt.Printf("WARNING:file size %d in OVF is not align with real file size %d, upload task may hung.\n", + uDetails.fileSizeToUpload, fileSize) + } + + // do not allow smaller than 1kb + if uDetails.uploadPieceSize > 1024 && uDetails.uploadPieceSize < uDetails.fileSizeToUpload { + pieceSize = uDetails.uploadPieceSize + } else { + pieceSize = defaultPieceSize + } + + util.Logger.Printf("[TRACE] Uploading will use piece size: %#v \n", pieceSize) + part = make([]byte, pieceSize) + + for { + if count, err = io.ReadFull(file, part); err != nil { + break + } + err = uploadPartFile(client, part, int64(count), uDetails) + uDetails.uploadedBytes += int64(count) + uDetails.uploadedBytesForCallback += int64(count) + if err != nil { + util.Logger.Printf("[ERROR] during upload process: %s, error %s ", filePath, err) + *uDetails.uploadError = err + return 0, err + } + } + + // upload last part as ReadFull returns io.ErrUnexpectedEOF when reaches end of file. + if err == io.ErrUnexpectedEOF { + err = uploadPartFile(client, part[:count], int64(count), uDetails) + if err != nil { + util.Logger.Printf("[ERROR] during upload process: %s, error %s ", filePath, err) + *uDetails.uploadError = err + return 0, err + } + } else { + util.Logger.Printf("Error Uploading: %s, error %s ", filePath, err) + *uDetails.uploadError = err + return 0, err + } + + return fileSize, nil +} + +// Create Request with right headers and range settings. Support multi part file upload. +// client - client for requests +// requestUrl - upload url +// filePart - bytes to upload +// offset - how much is uploaded +// filePartSize - how much bytes will be uploaded +// fileSizeToUpload - final file size +func newFileUploadRequest(client *Client, requestUrl string, filePart []byte, offset, filePartSize, fileSizeToUpload int64) (*http.Request, error) { + util.Logger.Printf("[TRACE] Creating file upload request: %s, %v, %v, %v \n", requestUrl, offset, filePartSize, fileSizeToUpload) + + parsedRequestURL, err := url.ParseRequestURI(requestUrl) + if err != nil { + return nil, fmt.Errorf("error decoding vdc response: %s", err) + } + + uploadReq := client.NewRequestWitNotEncodedParams(nil, nil, http.MethodPut, *parsedRequestURL, bytes.NewReader(filePart)) + + uploadReq.ContentLength = filePartSize + uploadReq.Header.Set("Content-Length", strconv.FormatInt(uploadReq.ContentLength, 10)) + + rangeExpression := "bytes " + strconv.FormatInt(int64(offset), 10) + "-" + strconv.FormatInt(int64(offset+filePartSize-1), 10) + "/" + strconv.FormatInt(int64(fileSizeToUpload), 10) + uploadReq.Header.Set("Content-Range", rangeExpression) + + for key, value := range uploadReq.Header { + util.Logger.Printf("[TRACE] Header: %s :%s \n", key, value) + } + + return uploadReq, nil +} + +// Initiates file part upload by creating request and running it. +// params: +// client - client for requests +// part - bytes of file part +// partDataSize - how much bytes will be uploaded +// uploadDetails - file upload settings and data +func uploadPartFile(client *Client, part []byte, partDataSize int64, uDetails uploadDetails) error { + // Avoids session time out, as the multi part upload is treated as one request + makeEmptyRequest(client) + request, err := newFileUploadRequest(client, uDetails.uploadLink, part, uDetails.uploadedBytes, partDataSize, uDetails.fileSizeToUpload) + if err != nil { + return err + } + + response, err := checkResp(client.Http.Do(request)) + if err != nil { + return fmt.Errorf("file upload failed. Err: %s", err) + } + err = response.Body.Close() + if err != nil { + return fmt.Errorf("file closing failed. Err: %s", err) + } + + uDetails.callBack(uDetails.uploadedBytesForCallback+partDataSize, uDetails.allFilesSize) + + return nil +} + +// call query for task which are very fast and optimised as UI calls it very often +func makeEmptyRequest(client *Client) { + apiEndpoint := client.VCDHREF + apiEndpoint.Path += "/query?type=task&format=records&page=1&pageSize=5&" + + _, err := client.ExecuteRequest(apiEndpoint.String(), http.MethodGet, + "", "error making empty request: %s", nil, nil) + if err != nil { + util.Logger.Printf("[DEBUG - makeEmptyRequest] error executing request: %s", err) + } +} + +func getUploadLink(files *types.FilesList) (*url.URL, error) { + util.Logger.Printf("[TRACE] getUploadLink - Parsing upload link: %#v\n", files) + + if len(files.File) > 1 { + return nil, errors.New("unexpected response from vCD: found more than one link for upload") + } + + ovfUploadHref, err := url.ParseRequestURI(files.File[0].Link[0].HREF) + if err != nil { + return nil, err + } + + util.Logger.Printf("[TRACE] getUploadLink- upload link found: %#v\n", ovfUploadHref) + return ovfUploadHref, nil +} + +func createTaskForVcdImport(client *Client, taskHREF string) (Task, error) { + util.Logger.Printf("[TRACE] Create task for vcd with HREF: %s\n", taskHREF) + + taskURL, err := url.ParseRequestURI(taskHREF) + if err != nil { + return Task{}, err + } + + request := client.NewRequest(map[string]string{}, http.MethodGet, *taskURL, nil) + response, err := checkResp(client.Http.Do(request)) + if err != nil { + return Task{}, err + } + + task := NewTask(client) + + if err = decodeBody(types.BodyTypeXML, response, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding Task response: %s", err) + } + + // The request was successful + return *task, nil +} + +func getProgressCallBackFunction() (func(int64, int64), *mutexedProgress) { + uploadProgress := &mutexedProgress{} + callback := func(bytesUploaded, totalSize int64) { + uploadProgress.LockedSet((float64(bytesUploaded) / float64(totalSize)) * 100) + } + return callback, uploadProgress +} + +func validateAndFixFilePath(file string) (string, error) { + absolutePath, err := filepath.Abs(file) + if err != nil { + return "", err + } + fileInfo, err := os.Stat(absolutePath) + if os.IsNotExist(err) { + return "", err + } + if fileInfo.Size() == 0 { + return "", errors.New("file is empty") + } + return absolutePath, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/uploadtask.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/uploadtask.go new file mode 100644 index 000000000..66bc87568 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/uploadtask.go @@ -0,0 +1,60 @@ +/* + * Copyright 2018 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "time" +) + +type UploadTask struct { + uploadProgress *mutexedProgress + *Task + uploadError *error +} + +// Creates wrapped Task which is dedicated for upload functionality and +// provides additional functionality to monitor upload progress. +func NewUploadTask(task *Task, uploadProgress *mutexedProgress, uploadError *error) *UploadTask { + return &UploadTask{ + uploadProgress, + task, + uploadError, + } +} + +func (uploadTask *UploadTask) GetUploadProgress() string { + return fmt.Sprintf("%.2f", uploadTask.uploadProgress.LockedGet()) +} + +func (uploadTask *UploadTask) ShowUploadProgress() error { + fmt.Printf("Waiting...") + + for { + if *uploadTask.uploadError != nil { + return *uploadTask.uploadError + } + + fmt.Printf("\rUpload progress %.2f%%", uploadTask.uploadProgress.LockedGet()) + if uploadTask.uploadProgress.LockedGet() == 100.00 { + fmt.Println() + break + } + // Upload may be cancelled by user on GUI manually, detect task status + if err := uploadTask.Refresh(); err != nil { + return err + } + if uploadTask.Task.Task.Status != "queued" && uploadTask.Task.Task.Status != "preRunning" && uploadTask.Task.Task.Status != "running" { + fmt.Println() + break + } + time.Sleep(1 * time.Second) + } + return nil +} + +func (uploadTask *UploadTask) GetUploadError() error { + return *uploadTask.uploadError +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/user.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/user.go new file mode 100644 index 000000000..d1678025a --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/user.go @@ -0,0 +1,543 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +// Definition of an OrgUser +type OrgUser struct { + User *types.User + client *Client + AdminOrg *AdminOrg // needed to be able to update, as the list of roles is found in the Org +} + +// Simplified structure to insert or modify an organization user +type OrgUserConfiguration struct { + Name string // Mandatory + Password string // Mandatory + RoleName string // Mandatory + ProviderType string // Optional: defaults to "INTEGRATED" + IsEnabled bool // Optional: defaults to false + IsLocked bool // Only used for updates + DeployedVmQuota int // Optional: 0 means "unlimited" + StoredVmQuota int // Optional: 0 means "unlimited" + FullName string // Optional + Description string // Optional + EmailAddress string // Optional + Telephone string // Optional + IM string // Optional +} + +const ( + // Common role names and provider types are kept here to reduce hard-coded text and prevent mistakes + // Roles that are added to the organization need to be entered as free text + + OrgUserRoleOrganizationAdministrator = "Organization Administrator" + OrgUserRoleCatalogAuthor = "Catalog Author" + OrgUserRoleVappAuthor = "vApp Author" + OrgUserRoleVappUser = "vApp User" + OrgUserRoleConsoleAccessOnly = "Console Access Only" + OrgUserRoleDeferToIdentityProvider = "Defer to Identity Provider" + + // Allowed values for provider types + OrgUserProviderIntegrated = "INTEGRATED" // The user is created locally or imported from LDAP + OrgUserProviderSAML = "SAML" // The user is imported from a SAML identity provider. + OrgUserProviderOAUTH = "OAUTH" // The user is imported from an OAUTH identity provider +) + +// Used to check the validity of provider type on creation +var OrgUserProviderTypes = []string{ + OrgUserProviderIntegrated, + OrgUserProviderSAML, + OrgUserProviderOAUTH, +} + +// NewUser creates an empty user +func NewUser(cli *Client, org *AdminOrg) *OrgUser { + return &OrgUser{ + User: new(types.User), + client: cli, + AdminOrg: org, + } +} + +// FetchUserByHref returns a user by its HREF +// Deprecated: use GetUserByHref instead +func (adminOrg *AdminOrg) FetchUserByHref(href string) (*OrgUser, error) { + return adminOrg.GetUserByHref(href) +} + +// FetchUserByName returns a user by its Name +// Deprecated: use GetUserByName instead +func (adminOrg *AdminOrg) FetchUserByName(name string, refresh bool) (*OrgUser, error) { + return adminOrg.GetUserByName(name, refresh) +} + +// FetchUserById returns a user by its ID +// Deprecated: use GetUserById instead +func (adminOrg *AdminOrg) FetchUserById(id string, refresh bool) (*OrgUser, error) { + return adminOrg.GetUserById(id, refresh) +} + +// FetchUserById returns a user by its Name or ID +// Deprecated: use GetUserByNameOrId instead +func (adminOrg *AdminOrg) FetchUserByNameOrId(identifier string, refresh bool) (*OrgUser, error) { + return adminOrg.GetUserByNameOrId(identifier, refresh) +} + +// GetUserByHref returns a user by its HREF, without need for +// searching in the adminOrg user list +func (adminOrg *AdminOrg) GetUserByHref(href string) (*OrgUser, error) { + orgUser := NewUser(adminOrg.client, adminOrg) + + _, err := adminOrg.client.ExecuteRequest(href, http.MethodGet, + types.MimeAdminUser, "error getting user: %s", nil, orgUser.User) + + if err != nil { + return nil, err + } + return orgUser, nil +} + +// GetUserByName retrieves a user within an admin organization by name +// Returns a valid user if it exists. If it doesn't, returns nil and ErrorEntityNotFound +// If argument refresh is true, the AdminOrg will be refreshed before searching. +// This is usually done after creating, modifying, or deleting users. +// If it is false, it will search within the data already in memory (useful when +// looping through the users and we know that no changes have occurred in the meantime) +func (adminOrg *AdminOrg) GetUserByName(name string, refresh bool) (*OrgUser, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + for _, user := range adminOrg.AdminOrg.Users.User { + if user.Name == name { + return adminOrg.GetUserByHref(user.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetUserById retrieves a user within an admin organization by ID +// Returns a valid user if it exists. If it doesn't, returns nil and ErrorEntityNotFound +// If argument refresh is true, the AdminOrg will be refreshed before searching. +// This is usually done after creating, modifying, or deleting users. +// If it is false, it will search within the data already in memory (useful when +// looping through the users and we know that no changes have occurred in the meantime) +func (adminOrg *AdminOrg) GetUserById(id string, refresh bool) (*OrgUser, error) { + if refresh { + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + } + + for _, user := range adminOrg.AdminOrg.Users.User { + if equalIds(id, user.ID, user.HREF) { + return adminOrg.GetUserByHref(user.HREF) + } + } + return nil, ErrorEntityNotFound +} + +// GetUserByNameOrId retrieves a user within an admin organization +// by either name or ID +// Returns a valid user if it exists. If it doesn't, returns nil and ErrorEntityNotFound +// If argument refresh is true, the AdminOrg will be refreshed before searching. +// This is usually done after creating, modifying, or deleting users. +// If it is false, it will search within the data already in memory (useful when +// looping through the users and we know that no changes have occurred in the meantime) +func (adminOrg *AdminOrg) GetUserByNameOrId(identifier string, refresh bool) (*OrgUser, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetUserByName(name, refresh) } + getById := func(name string, refresh bool) (interface{}, error) { return adminOrg.GetUserById(name, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, refresh) + if entity == nil { + return nil, err + } + return entity.(*OrgUser), err +} + +// GetRoleReference finds a role within the organization +func (adminOrg *AdminOrg) GetRoleReference(roleName string) (*types.Reference, error) { + + // We force refresh of the organization, to make sure that roles recently created + // are taken into account. + // This will become unnecessary when we refactor the User management with OpenAPI + err := adminOrg.Refresh() + if err != nil { + return nil, err + } + for _, role := range adminOrg.AdminOrg.RoleReferences.RoleReference { + if role.Name == roleName { + return role, nil + } + } + + return nil, ErrorEntityNotFound +} + +// Retrieves a user within the boundaries of MaxRetryTimeout +func retrieveUserWithTimeout(adminOrg *AdminOrg, userName string) (*OrgUser, error) { + + // Attempting to retrieve the user + delayPerAttempt := 200 * time.Millisecond + maxOperationTimeout := time.Duration(adminOrg.client.MaxRetryTimeout) * time.Second + + // We make sure that the timeout is never less than 2 seconds + if maxOperationTimeout < 2*time.Second { + maxOperationTimeout = 2 * time.Second + } + + // If maxRetryTimeout is set to a higher limit, we lower it to match the + // expectations for this operation. If the user is not created within 10 seconds, + // there is no need to wait for more. Usually, the operation lasts between 200ms and 900ms + if maxOperationTimeout > 10*time.Second { + maxOperationTimeout = 10 * time.Second + } + + startTime := time.Now() + elapsed := time.Since(startTime) + var newUser *OrgUser + var err error + for elapsed < maxOperationTimeout { + newUser, err = adminOrg.GetUserByName(userName, true) + if err == nil { + break + } + time.Sleep(delayPerAttempt) + elapsed = time.Since(startTime) + } + + elapsed = time.Since(startTime) + + // If the user was not retrieved within the allocated time, we inform the user about the failure + // and the time it occurred to get to this point, so that they may try with a longer time + if err != nil { + return nil, fmt.Errorf("failure to retrieve a new user after %s : %s", elapsed, err) + } + + return newUser, nil +} + +// CreateUser creates an OrgUser from a full configuration structure +// The timeOut variable is the maximum time we wait for the user to be ready +// (This operation does not return a task) +// This function returns as soon as the user has been created, which could be as +// little as 200ms or as much as Client.MaxRetryTimeout +// Mandatory fields are: Name, Role, Password. +// https://code.vmware.com/apis/442/vcloud-director#/doc/doc/operations/POST-CreateUser.html +func (adminOrg *AdminOrg) CreateUser(userConfiguration *types.User) (*OrgUser, error) { + err := validateUserForCreation(userConfiguration) + if err != nil { + return nil, err + } + + userCreateHREF, err := url.ParseRequestURI(adminOrg.AdminOrg.HREF) + if err != nil { + return nil, fmt.Errorf("error parsing admin org url: %s", err) + } + userCreateHREF.Path += "/users" + + user := NewUser(adminOrg.client, adminOrg) + + _, err = adminOrg.client.ExecuteRequest(userCreateHREF.String(), http.MethodPost, + types.MimeAdminUser, "error creating user: %s", userConfiguration, user.User) + if err != nil { + return nil, err + } + + // If there is a valid task, we try to follow through + // A valid task exists if the Task object in the user structure + // is not nil and contains at least a task + if user.User.Tasks != nil && len(user.User.Tasks.Task) > 0 { + task := NewTask(adminOrg.client) + task.Task = user.User.Tasks.Task[0] + err = task.WaitTaskCompletion() + + if err != nil { + return nil, err + } + } + + return retrieveUserWithTimeout(adminOrg, userConfiguration.Name) +} + +// CreateUserSimple creates an org user from a simplified structure +func (adminOrg *AdminOrg) CreateUserSimple(userData OrgUserConfiguration) (*OrgUser, error) { + + if userData.Name == "" { + return nil, fmt.Errorf("name is mandatory to create a user") + } + if userData.Password == "" { + return nil, fmt.Errorf("password is mandatory to create a user") + } + if userData.RoleName == "" { + return nil, fmt.Errorf("role is mandatory to create a user") + } + role, err := adminOrg.GetRoleReference(userData.RoleName) + if err != nil { + return nil, fmt.Errorf("error finding a role named %s", userData.RoleName) + } + + var userConfiguration = types.User{ + Xmlns: types.XMLNamespaceVCloud, + Type: types.MimeAdminUser, + ProviderType: userData.ProviderType, + Name: userData.Name, + IsEnabled: userData.IsEnabled, + Password: userData.Password, + DeployedVmQuota: userData.DeployedVmQuota, + StoredVmQuota: userData.StoredVmQuota, + FullName: userData.FullName, + EmailAddress: userData.EmailAddress, + Description: userData.Description, + Telephone: userData.Telephone, + IM: userData.IM, + Role: &types.Reference{HREF: role.HREF}, + } + + // ShowUser(userConfiguration) + return adminOrg.CreateUser(&userConfiguration) +} + +// GetRoleName retrieves the name of the role currently assigned to the user +func (user *OrgUser) GetRoleName() string { + if user.User.Role == nil { + return "" + } + return user.User.Role.Name +} + +// Delete removes the user, returning an error if the call fails. +// if requested, it will attempt to take ownership before the removal. +// API Documentation: https://code.vmware.com/apis/442/vcloud-director#/doc/doc/operations/DELETE-User.html +// Note: in the GUI we need to disable the user before deleting. +// There is no such constraint with the API. +// +// Expected behaviour: +// with takeOwnership = true, all entities owned by the user being deleted will be transferred to the caller. +// with takeOwnership = false, if the user own catalogs, networks, or running VMs/vApps, the call will fail. +// If the user owns only powered-off VMs/vApps, the call will succeeds and the +// VMs/vApps will be removed. +func (user *OrgUser) Delete(takeOwnership bool) error { + util.Logger.Printf("[TRACE] Deleting user: %#v (take ownership: %v)", user.User.Name, takeOwnership) + + if takeOwnership { + err := user.TakeOwnership() + if err != nil { + return err + } + } + + userHREF, err := url.ParseRequestURI(user.User.Href) + if err != nil { + return fmt.Errorf("error getting HREF for user %s : %s", user.User.Name, err) + } + util.Logger.Printf("[TRACE] Url for deleting user : %#v and name: %s", userHREF, user.User.Name) + + return user.client.ExecuteRequestWithoutResponse(userHREF.String(), http.MethodDelete, + types.MimeAdminUser, "error deleting user : %s", nil) +} + +// UpdateSimple updates the user, using ALL the fields in userData structure +// returning an error if the call fails. +// Careful: DeployedVmQuota and StoredVmQuota use a `0` value to mean "unlimited" +func (user *OrgUser) UpdateSimple(userData OrgUserConfiguration) error { + util.Logger.Printf("[TRACE] Updating user: %#v", user.User.Name) + + if userData.Name != "" { + user.User.Name = userData.Name + } + if userData.ProviderType != "" { + user.User.ProviderType = userData.ProviderType + } + if userData.Description != "" { + user.User.Description = userData.Description + } + if userData.FullName != "" { + user.User.FullName = userData.FullName + } + if userData.EmailAddress != "" { + user.User.EmailAddress = userData.EmailAddress + } + if userData.Telephone != "" { + user.User.Telephone = userData.Telephone + } + if userData.Password != "" { + user.User.Password = userData.Password + } + user.User.StoredVmQuota = userData.StoredVmQuota + user.User.DeployedVmQuota = userData.DeployedVmQuota + user.User.IsEnabled = userData.IsEnabled + user.User.IsLocked = userData.IsLocked + + if userData.RoleName != "" && user.User.Role != nil && user.User.Role.Name != userData.RoleName { + newRole, err := user.AdminOrg.GetRoleReference(userData.RoleName) + if err != nil { + return err + } + user.User.Role = newRole + } + return user.Update() +} + +// Update updates the user, using its own configuration data +// returning an error if the call fails. +// API Documentation: https://code.vmware.com/apis/442/vcloud-director#/doc/doc/operations/PUT-User.html +func (user *OrgUser) Update() error { + util.Logger.Printf("[TRACE] Updating user: %s", user.User.Name) + + // Makes sure that GroupReferences is either properly filled or nil, + // because otherwise vCD will complain that the payload is not well formatted when + // the configuration contains a non-empty password. + if user.User.GroupReferences != nil { + if len(user.User.GroupReferences.GroupReference) == 0 { + user.User.GroupReferences = nil + } + } + + userHREF, err := url.ParseRequestURI(user.User.Href) + if err != nil { + return fmt.Errorf("error getting HREF for user %s : %s", user.User.Name, err) + } + util.Logger.Printf("[TRACE] Url for updating user : %#v and name: %s", userHREF, user.User.Name) + + _, err = user.client.ExecuteRequest(userHREF.String(), http.MethodPut, + types.MimeAdminUser, "error updating user : %s", user.User, nil) + return err +} + +// Disable disables a user, if it is enabled. Fails otherwise. +func (user *OrgUser) Disable() error { + util.Logger.Printf("[TRACE] Disabling user: %s", user.User.Name) + + if !user.User.IsEnabled { + return fmt.Errorf("user %s is already disabled", user.User.Name) + } + user.User.IsEnabled = false + + return user.Update() +} + +// ChangePassword changes user's password +// Constraints: the password must be non-empty, with a minimum of 6 characters +func (user *OrgUser) ChangePassword(newPass string) error { + util.Logger.Printf("[TRACE] Changing user's password user: %s", user.User.Name) + + user.User.Password = newPass + + return user.Update() +} + +// Enable enables a user if it was disabled. Fails otherwise. +func (user *OrgUser) Enable() error { + util.Logger.Printf("[TRACE] Enabling user: %s", user.User.Name) + + if user.User.IsEnabled { + return fmt.Errorf("user %s is already enabled", user.User.Name) + } + user.User.IsEnabled = true + + return user.Update() +} + +// Unlock unlocks a user that was locked out by the system. +// Note that there is no procedure to LOCK a user: it is locked by the system when it exceeds the number of +// unauthorized access attempts +func (user *OrgUser) Unlock() error { + util.Logger.Printf("[TRACE] Unlocking user: %s", user.User.Name) + + if !user.User.IsLocked { + return fmt.Errorf("user %s is not locked", user.User.Name) + } + user.User.IsLocked = false + + return user.Update() +} + +// ChangeRole changes a user's role +// Fails is we try to set the same role as the current one. +// Also fails if the provided role name is not found. +func (user *OrgUser) ChangeRole(roleName string) error { + util.Logger.Printf("[TRACE] Changing user's role: %s", user.User.Name) + + if roleName == "" { + return fmt.Errorf("role name cannot be empty") + } + + if user.User.Role != nil && user.User.Role.Name == roleName { + return fmt.Errorf("new role is the same as current role") + } + + newRole, err := user.AdminOrg.GetRoleReference(roleName) + if err != nil { + return err + } + user.User.Role = newRole + + return user.Update() +} + +// TakeOwnership takes ownership of the user's objects. +// Ownership is transferred to the caller. +// This is a call to make before deleting. Calling user.DeleteTakeOwnership() will +// run TakeOwnership before the actual user removal. +// API Documentation: https://code.vmware.com/apis/442/vcloud-director#/doc/doc/operations/POST-TakeOwnership.html +func (user *OrgUser) TakeOwnership() error { + util.Logger.Printf("[TRACE] Taking ownership from user: %s", user.User.Name) + + userHREF, err := url.ParseRequestURI(user.User.Href + "/action/takeOwnership") + if err != nil { + return fmt.Errorf("error getting HREF for user %s : %s", user.User.Name, err) + } + util.Logger.Printf("[TRACE] Url for taking ownership from user : %#v and name: %s", userHREF, user.User.Name) + + return user.client.ExecuteRequestWithoutResponse(userHREF.String(), http.MethodPost, + types.MimeAdminUser, "error taking ownership from user : %s", nil) +} + +// validateUserForInput makes sure that the minimum data +// needed for creating an org user has been included in the configuration +func validateUserForCreation(user *types.User) error { + var missingField = "missing field %s" + if user.Xmlns == "" { + user.Xmlns = types.XMLNamespaceVCloud + } + if user.Type == "" { + user.Type = types.MimeAdminUser + } + if user.Name == "" { + return fmt.Errorf(missingField, "Name") + } + if user.Password == "" { + return fmt.Errorf(missingField, "Password") + } + if user.ProviderType != "" { + validProviderType := false + for _, pt := range OrgUserProviderTypes { + if user.ProviderType == pt { + validProviderType = true + } + } + if !validProviderType { + return fmt.Errorf("'%s' is not a valid provider type", user.ProviderType) + } + } + if user.Role.HREF == "" { + return fmt.Errorf(missingField, "Role.HREF") + } + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp.go new file mode 100644 index 000000000..337304805 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp.go @@ -0,0 +1,1531 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type VApp struct { + VApp *types.VApp + client *Client +} + +func NewVApp(cli *Client) *VApp { + return &VApp{ + VApp: new(types.VApp), + client: cli, + } +} + +func (vcdClient *VCDClient) NewVApp(client *Client) VApp { + newvapp := NewVApp(client) + return *newvapp +} + +// struct type used to pass information for vApp network creation +type VappNetworkSettings struct { + ID string + Name string + Description string + Gateway string + NetMask string + DNS1 string + DNS2 string + DNSSuffix string + GuestVLANAllowed *bool + StaticIPRanges []*types.IPRange + DhcpSettings *DhcpSettings + RetainIpMacEnabled *bool + VappFenceEnabled *bool +} + +// struct type used to pass information for vApp network DHCP +type DhcpSettings struct { + IsEnabled bool + MaxLeaseTime int + DefaultLeaseTime int + IPRange *types.IPRange +} + +// Returns the vdc where the vapp resides in. +func (vapp *VApp) getParentVDC() (Vdc, error) { + for _, link := range vapp.VApp.Link { + if (link.Type == types.MimeVDC || link.Type == types.MimeAdminVDC) && link.Rel == "up" { + + vdc := NewVdc(vapp.client) + + _, err := vapp.client.ExecuteRequest(link.HREF, http.MethodGet, + "", "error retrieving parent vdc: %s", nil, vdc.Vdc) + if err != nil { + return Vdc{}, err + } + + parent, err := vdc.getParentOrg() + if err != nil { + return Vdc{}, err + } + vdc.parent = parent + return *vdc, nil + } + } + return Vdc{}, fmt.Errorf("could not find a parent Vdc") +} + +func (vapp *VApp) Refresh() error { + + if vapp.VApp.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + url := vapp.VApp.HREF + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + vapp.VApp = &types.VApp{} + + _, err := vapp.client.ExecuteRequest(url, http.MethodGet, + "", "error refreshing vApp: %s", nil, vapp.VApp) + + // The request was successful + return err +} + +// AddVM create vm in vApp using vApp template +// orgVdcNetworks - adds org VDC networks to be available for vApp. Can be empty. +// vappNetworkName - adds vApp network to be available for vApp. Can be empty. +// vappTemplate - vApp Template which will be used for VM creation. +// name - name for VM. +// acceptAllEulas - setting allows to automatically accept or not Eulas. +// +// Deprecated: Use vapp.AddNewVM instead for more sophisticated network handling +func (vapp *VApp) AddVM(orgVdcNetworks []*types.OrgVDCNetwork, vappNetworkName string, vappTemplate VAppTemplate, name string, acceptAllEulas bool) (Task, error) { + util.Logger.Printf("[INFO] vapp.AddVM() is deprecated in favor of vapp.AddNewVM()") + if vappTemplate == (VAppTemplate{}) || vappTemplate.VAppTemplate == nil { + return Task{}, fmt.Errorf("vApp Template can not be empty") + } + + // primaryNetworkConnectionIndex will be inherited from template or defaulted to 0 + // if the template does not have any NICs assigned. + primaryNetworkConnectionIndex := 0 + if vappTemplate.VAppTemplate.Children != nil && len(vappTemplate.VAppTemplate.Children.VM) > 0 && + vappTemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection != nil { + primaryNetworkConnectionIndex = vappTemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.PrimaryNetworkConnectionIndex + } + + networkConnectionSection := types.NetworkConnectionSection{ + Info: "Network config for sourced item", + PrimaryNetworkConnectionIndex: primaryNetworkConnectionIndex, + } + + for index, orgVdcNetwork := range orgVdcNetworks { + networkConnectionSection.NetworkConnection = append(networkConnectionSection.NetworkConnection, + &types.NetworkConnection{ + Network: orgVdcNetwork.Name, + NetworkConnectionIndex: index, + IsConnected: true, + IPAddressAllocationMode: types.IPAllocationModePool, + }, + ) + } + + if vappNetworkName != "" { + networkConnectionSection.NetworkConnection = append(networkConnectionSection.NetworkConnection, + &types.NetworkConnection{ + Network: vappNetworkName, + NetworkConnectionIndex: len(orgVdcNetworks), + IsConnected: true, + IPAddressAllocationMode: types.IPAllocationModePool, + }, + ) + } + + return vapp.AddNewVM(name, vappTemplate, &networkConnectionSection, acceptAllEulas) +} + +// AddNewVM adds VM from vApp template with custom NetworkConnectionSection +func (vapp *VApp) AddNewVM(name string, vappTemplate VAppTemplate, network *types.NetworkConnectionSection, acceptAllEulas bool) (Task, error) { + return vapp.AddNewVMWithStorageProfile(name, vappTemplate, network, nil, acceptAllEulas) +} + +// AddNewVMWithStorageProfile adds VM from vApp template with custom NetworkConnectionSection and optional storage profile +func (vapp *VApp) AddNewVMWithStorageProfile(name string, vappTemplate VAppTemplate, + network *types.NetworkConnectionSection, + storageProfileRef *types.Reference, acceptAllEulas bool) (Task, error) { + return addNewVMW(vapp, name, vappTemplate, network, storageProfileRef, nil, acceptAllEulas) +} + +// AddNewVMWithComputePolicy adds VM from vApp template with custom NetworkConnectionSection and optional storage profile +// and compute policy +func (vapp *VApp) AddNewVMWithComputePolicy(name string, vappTemplate VAppTemplate, + network *types.NetworkConnectionSection, + storageProfileRef *types.Reference, computePolicy *types.VdcComputePolicy, acceptAllEulas bool) (Task, error) { + return addNewVMW(vapp, name, vappTemplate, network, storageProfileRef, computePolicy, acceptAllEulas) +} + +// addNewVMW adds VM from vApp template with custom NetworkConnectionSection and optional storage profile +// and optional compute policy +func addNewVMW(vapp *VApp, name string, vappTemplate VAppTemplate, + network *types.NetworkConnectionSection, + storageProfileRef *types.Reference, computePolicy *types.VdcComputePolicy, acceptAllEulas bool) (Task, error) { + + if vappTemplate == (VAppTemplate{}) || vappTemplate.VAppTemplate == nil { + return Task{}, fmt.Errorf("vApp Template can not be empty") + } + + templateHref := vappTemplate.VAppTemplate.HREF + if vappTemplate.VAppTemplate.Children != nil && len(vappTemplate.VAppTemplate.Children.VM) != 0 { + templateHref = vappTemplate.VAppTemplate.Children.VM[0].HREF + } + + // Status 8 means The object is resolved and powered off. + // https://vdc-repo.vmware.com/vmwb-repository/dcr-public/94b8bd8d-74ff-4fe3-b7a4-41ae31516ed7/1b42f3b5-8b31-4279-8b3f-547f6c7c5aa8/doc/GUID-843BE3AD-5EF6-4442-B864-BCAE44A51867.html + if vappTemplate.VAppTemplate.Status != 8 { + return Task{}, fmt.Errorf("vApp Template shape is not ok (status: %d)", vappTemplate.VAppTemplate.Status) + } + + // Validate network config only if it was supplied + if network != nil && network.NetworkConnection != nil { + for _, nic := range network.NetworkConnection { + if nic.Network == "" { + return Task{}, fmt.Errorf("missing mandatory attribute Network: %s", nic.Network) + } + if nic.IPAddressAllocationMode == "" { + return Task{}, fmt.Errorf("missing mandatory attribute IPAddressAllocationMode: %s", nic.IPAddressAllocationMode) + } + } + } + + vAppComposition := &types.ReComposeVAppParams{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + Deploy: false, + Name: vapp.VApp.Name, + PowerOn: false, + Description: vapp.VApp.Description, + SourcedItem: &types.SourcedCompositionItemParam{ + Source: &types.Reference{ + HREF: templateHref, + Name: name, + }, + InstantiationParams: &types.InstantiationParams{}, // network config is injected below + }, + AllEULAsAccepted: acceptAllEulas, + } + + // Add storage profile + if storageProfileRef != nil && storageProfileRef.HREF != "" { + vAppComposition.SourcedItem.StorageProfile = storageProfileRef + } + + // Add compute policy + if computePolicy != nil && computePolicy.ID != "" { + vdcComputePolicyHref, err := vapp.client.OpenApiBuildEndpoint(types.OpenApiPathVersion1_0_0, types.OpenApiEndpointVdcComputePolicies, computePolicy.ID) + if err != nil { + return Task{}, fmt.Errorf("error constructing HREF for compute policy") + } + vAppComposition.SourcedItem.ComputePolicy = &types.ComputePolicy{VmSizingPolicy: &types.Reference{HREF: vdcComputePolicyHref.String()}} + } + + // Inject network config + vAppComposition.SourcedItem.InstantiationParams.NetworkConnectionSection = network + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/action/recomposeVApp" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + types.MimeRecomposeVappParams, "error instantiating a new VM: %s", vAppComposition) + +} + +// ========================= issue#252 ================================== +// TODO: To be refactored, handling networks better. See issue#252 for details +// https://github.com/vmware/go-vcloud-director/issues/252 +// ====================================================================== +func (vapp *VApp) RemoveVM(vm VM) error { + err := vapp.Refresh() + if err != nil { + return fmt.Errorf("error refreshing vApp before removing VM: %s", err) + } + task := NewTask(vapp.client) + if vapp.VApp.Tasks != nil { + for _, taskItem := range vapp.VApp.Tasks.Task { + task.Task = taskItem + // Leftover tasks may have unhandled errors that can be dismissed at this stage + // we complete any incomplete tasks at this stage, to finish the refresh. + if task.Task.Status != "error" && task.Task.Status != "success" { + err := task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error performing task: %s", err) + } + } + } + } + + vcomp := &types.ReComposeVAppParams{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + DeleteItem: &types.DeleteItem{ + HREF: vm.VM.HREF, + }, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/action/recomposeVApp" + + deleteTask, err := vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + types.MimeRecomposeVappParams, "error removing VM: %s", vcomp) + if err != nil { + return err + } + + err = deleteTask.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error performing removing VM task: %s", err) + } + + return nil +} + +func (vapp *VApp) PowerOn() (Task, error) { + + err := vapp.BlockWhileStatus("UNRESOLVED", vapp.client.MaxRetryTimeout) + if err != nil { + return Task{}, fmt.Errorf("error powering on vApp: %s", err) + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/powerOn" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error powering on vApp: %s", nil) +} + +func (vapp *VApp) PowerOff() (Task, error) { + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/powerOff" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error powering off vApp: %s", nil) + +} + +func (vapp *VApp) Reboot() (Task, error) { + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/reboot" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error rebooting vApp: %s", nil) +} + +func (vapp *VApp) Reset() (Task, error) { + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/reset" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error resetting vApp: %s", nil) +} + +func (vapp *VApp) Suspend() (Task, error) { + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/suspend" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error suspending vApp: %s", nil) +} + +func (vapp *VApp) Shutdown() (Task, error) { + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/power/action/shutdown" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error shutting down vApp: %s", nil) +} + +func (vapp *VApp) Undeploy() (Task, error) { + + vu := &types.UndeployVAppParams{ + Xmlns: types.XMLNamespaceVCloud, + UndeployPowerAction: "powerOff", + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/action/undeploy" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + types.MimeUndeployVappParams, "error undeploy vApp: %s", vu) +} + +func (vapp *VApp) Deploy() (Task, error) { + + vu := &types.DeployVAppParams{ + Xmlns: types.XMLNamespaceVCloud, + PowerOn: false, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/action/deploy" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + types.MimeDeployVappParams, "error deploy vApp: %s", vu) +} + +func (vapp *VApp) Delete() (Task, error) { + + // Return the task + return vapp.client.ExecuteTaskRequest(vapp.VApp.HREF, http.MethodDelete, + "", "error deleting vApp: %s", nil) +} + +func (vapp *VApp) RunCustomizationScript(computername, script string) (Task, error) { + return vapp.Customize(computername, script, false) +} + +// Customize applies customization to first child VM +// +// Deprecated: Use vm.SetGuestCustomizationSection() +func (vapp *VApp) Customize(computername, script string, changeSid bool) (Task, error) { + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + // Check if VApp Children is populated + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + vu := &types.GuestCustomizationSection{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + + HREF: vapp.VApp.Children.VM[0].HREF, + Type: types.MimeGuestCustomizationSection, + Info: "Specifies Guest OS Customization Settings", + Enabled: takeBoolPointer(true), + ComputerName: computername, + CustomizationScript: script, + ChangeSid: takeBoolPointer(changeSid), + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.Children.VM[0].HREF) + apiEndpoint.Path += "/guestCustomizationSection/" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeGuestCustomizationSection, "error customizing VM: %s", vu) +} + +func (vapp *VApp) GetStatus() (string, error) { + err := vapp.Refresh() + if err != nil { + return "", fmt.Errorf("error refreshing vApp: %s", err) + } + // Trying to make this function future-proof: + // If a new status is added to a future vCD API and the status map in types.go + // is not updated, we may get a panic. + // Using the ", ok" construct we take control of the data lookup and are able to fail + // gracefully. + statusText, ok := types.VAppStatuses[vapp.VApp.Status] + if ok { + return statusText, nil + } + return "", fmt.Errorf("status %d does not have a description in types.VappStatuses", vapp.VApp.Status) +} + +// BlockWhileStatus blocks until the status of vApp exits unwantedStatus. +// It sleeps 200 milliseconds between iterations and times out after timeOutAfterSeconds +// of seconds. +func (vapp *VApp) BlockWhileStatus(unwantedStatus string, timeOutAfterSeconds int) error { + timeoutAfter := time.After(time.Duration(timeOutAfterSeconds) * time.Second) + tick := time.NewTicker(200 * time.Millisecond) + + for { + select { + case <-timeoutAfter: + return fmt.Errorf("timed out waiting for vApp to exit state %s after %d seconds", + unwantedStatus, timeOutAfterSeconds) + case <-tick.C: + currentStatus, err := vapp.GetStatus() + + if err != nil { + return fmt.Errorf("could not get vApp status %s", err) + } + if currentStatus != unwantedStatus { + return nil + } + } + } +} + +func (vapp *VApp) GetNetworkConnectionSection() (*types.NetworkConnectionSection, error) { + + networkConnectionSection := &types.NetworkConnectionSection{} + + if vapp.VApp.Children.VM[0].HREF == "" { + return networkConnectionSection, fmt.Errorf("cannot refresh, Object is empty") + } + + _, err := vapp.client.ExecuteRequest(vapp.VApp.Children.VM[0].HREF+"/networkConnectionSection/", http.MethodGet, + types.MimeNetworkConnectionSection, "error retrieving network connection: %s", nil, networkConnectionSection) + + // The request was successful + return networkConnectionSection, err +} + +// Sets number of available virtual logical processors +// (i.e. CPUs x cores per socket) +// https://communities.vmware.com/thread/576209 +// Deprecated: Use vm.ChangeCPUcount() +func (vapp *VApp) ChangeCPUCount(virtualCpuCount int) (Task, error) { + return vapp.ChangeCPUCountWithCore(virtualCpuCount, nil) +} + +// Sets number of available virtual logical processors +// (i.e. CPUs x cores per socket) and cores per socket. +// Socket count is a result of: virtual logical processors/cores per socket +// https://communities.vmware.com/thread/576209 +// Deprecated: Use vm.ChangeCPUCountWithCore() +func (vapp *VApp) ChangeCPUCountWithCore(virtualCpuCount int, coresPerSocket *int) (Task, error) { + + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + // Check if VApp Children is populated + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + newcpu := &types.OVFItem{ + XmlnsRasd: types.XMLNamespaceRASD, + XmlnsVCloud: types.XMLNamespaceVCloud, + XmlnsXsi: types.XMLNamespaceXSI, + XmlnsVmw: types.XMLNamespaceVMW, + VCloudHREF: vapp.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu", + VCloudType: types.MimeRasdItem, + AllocationUnits: "hertz * 10^6", + Description: "Number of Virtual CPUs", + ElementName: strconv.Itoa(virtualCpuCount) + " virtual CPU(s)", + InstanceID: 4, + Reservation: 0, + ResourceType: types.ResourceTypeProcessor, + VirtualQuantity: int64(virtualCpuCount), + Weight: 0, + CoresPerSocket: coresPerSocket, + Link: &types.Link{ + HREF: vapp.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu", + Rel: "edit", + Type: types.MimeRasdItem, + }, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.Children.VM[0].HREF) + apiEndpoint.Path += "/virtualHardwareSection/cpu" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeRasdItem, "error changing CPU count: %s", newcpu) +} + +func (vapp *VApp) ChangeStorageProfile(name string) (Task, error) { + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + if vapp.VApp.Children == nil || len(vapp.VApp.Children.VM) == 0 { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + vdc, err := vapp.getParentVDC() + if err != nil { + return Task{}, fmt.Errorf("error retrieving parent VDC for vApp %s", vapp.VApp.Name) + } + storageProfileRef, err := vdc.FindStorageProfileReference(name) + if err != nil { + return Task{}, fmt.Errorf("error retrieving storage profile %s for vApp %s", name, vapp.VApp.Name) + } + + newProfile := &types.Vm{ + Name: vapp.VApp.Children.VM[0].Name, + StorageProfile: &storageProfileRef, + Xmlns: types.XMLNamespaceVCloud, + } + + // Return the task + return vapp.client.ExecuteTaskRequest(vapp.VApp.Children.VM[0].HREF, http.MethodPut, + types.MimeVM, "error changing CPU count: %s", newProfile) +} + +// Deprecated as it changes only first VM's name +func (vapp *VApp) ChangeVMName(name string) (Task, error) { + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + newName := &types.Vm{ + Name: name, + Xmlns: types.XMLNamespaceVCloud, + } + + // Return the task + return vapp.client.ExecuteTaskRequest(vapp.VApp.Children.VM[0].HREF, http.MethodPut, + types.MimeVM, "error changing VM name: %s", newName) +} + +// SetOvf sets guest properties for the first child VM in vApp +// +// Deprecated: Use vm.SetProductSectionList() +func (vapp *VApp) SetOvf(parameters map[string]string) (Task, error) { + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + if vapp.VApp.Children.VM[0].ProductSection == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children with ProductSection, interrupting customization") + } + + for key, value := range parameters { + for _, ovf_value := range vapp.VApp.Children.VM[0].ProductSection.Property { + if ovf_value.Key == key { + ovf_value.Value = &types.Value{Value: value} + break + } + } + } + + ovf := &types.ProductSectionList{ + Xmlns: types.XMLNamespaceVCloud, + Ovf: types.XMLNamespaceOVF, + ProductSection: vapp.VApp.Children.VM[0].ProductSection, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.Children.VM[0].HREF) + apiEndpoint.Path += "/productSections" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeProductSection, "error setting ovf: %s", ovf) +} + +func (vapp *VApp) ChangeNetworkConfig(networks []map[string]interface{}, ip string) (Task, error) { + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing VM before running customization: %s", err) + } + + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + networksection, err := vapp.GetNetworkConnectionSection() + if err != nil { + return Task{}, err + } + + for index, network := range networks { + // Determine what type of address is requested for the vApp + ipAllocationMode := types.IPAllocationModeNone + ipAddress := "Any" + + // TODO: Review current behaviour of using DHCP when left blank + if ip == "" || ip == "dhcp" || network["ip"] == "dhcp" { + ipAllocationMode = types.IPAllocationModeDHCP + } else if ip == "allocated" || network["ip"] == "allocated" { + ipAllocationMode = types.IPAllocationModePool + } else if ip == "none" || network["ip"] == "none" { + ipAllocationMode = types.IPAllocationModeNone + } else if ip != "" || network["ip"] != "" { + ipAllocationMode = types.IPAllocationModeManual + // TODO: Check a valid IP has been given + ipAddress = ip + } + + util.Logger.Printf("[DEBUG] Function ChangeNetworkConfig() for %s invoked", network["orgnetwork"]) + + networksection.Xmlns = types.XMLNamespaceVCloud + networksection.Ovf = types.XMLNamespaceOVF + networksection.Info = "Specifies the available VM network connections" + + networksection.NetworkConnection[index].NeedsCustomization = true + networksection.NetworkConnection[index].IPAddress = ipAddress + networksection.NetworkConnection[index].IPAddressAllocationMode = ipAllocationMode + networksection.NetworkConnection[index].MACAddress = "" + + if network["is_primary"] == true { + networksection.PrimaryNetworkConnectionIndex = index + } + + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.Children.VM[0].HREF) + apiEndpoint.Path += "/networkConnectionSection/" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeNetworkConnectionSection, "error changing network config: %s", networksection) +} + +// Deprecated as it changes only first VM's memory +func (vapp *VApp) ChangeMemorySize(size int) (Task, error) { + + err := vapp.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing vApp before running customization: %s", err) + } + + // Check if VApp Children is populated + if vapp.VApp.Children == nil { + return Task{}, fmt.Errorf("vApp doesn't contain any children, interrupting customization") + } + + newMem := &types.OVFItem{ + XmlnsRasd: types.XMLNamespaceRASD, + XmlnsVCloud: types.XMLNamespaceVCloud, + XmlnsXsi: types.XMLNamespaceXSI, + VCloudHREF: vapp.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory", + VCloudType: types.MimeRasdItem, + AllocationUnits: "byte * 2^20", + Description: "Memory Size", + ElementName: strconv.Itoa(size) + " MB of memory", + InstanceID: 5, + Reservation: 0, + ResourceType: types.ResourceTypeMemory, + VirtualQuantity: int64(size), + Weight: 0, + Link: &types.Link{ + HREF: vapp.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory", + Rel: "edit", + Type: types.MimeRasdItem, + }, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.Children.VM[0].HREF) + apiEndpoint.Path += "/virtualHardwareSection/memory" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeRasdItem, "error changing memory size: %s", newMem) +} + +func (vapp *VApp) GetNetworkConfig() (*types.NetworkConfigSection, error) { + + networkConfig := &types.NetworkConfigSection{} + + if vapp.VApp.HREF == "" { + return networkConfig, fmt.Errorf("cannot refresh, Object is empty") + } + + _, err := vapp.client.ExecuteRequest(vapp.VApp.HREF+"/networkConfigSection/", http.MethodGet, + types.MimeNetworkConfigSection, "error retrieving network config: %s", nil, networkConfig) + + // The request was successful + return networkConfig, err +} + +// AddRAWNetworkConfig adds existing VDC network to vApp +// Deprecated: in favor of vapp.AddOrgNetwork +func (vapp *VApp) AddRAWNetworkConfig(orgvdcnetworks []*types.OrgVDCNetwork) (Task, error) { + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return Task{}, fmt.Errorf("error getting vApp networks: %s", err) + } + networkConfigurations := vAppNetworkConfig.NetworkConfig + + for _, network := range orgvdcnetworks { + networkConfigurations = append(networkConfigurations, + types.VAppNetworkConfiguration{ + NetworkName: network.Name, + Configuration: &types.NetworkConfiguration{ + ParentNetwork: &types.Reference{ + HREF: network.HREF, + }, + FenceMode: types.FenceModeBridged, + }, + }, + ) + } + + return updateNetworkConfigurations(vapp, networkConfigurations) +} + +// Function allows to create isolated network for vApp. This is equivalent to vCD UI function - vApp network creation. +// Deprecated: in favor of vapp.CreateVappNetwork +func (vapp *VApp) AddIsolatedNetwork(newIsolatedNetworkSettings *VappNetworkSettings) (Task, error) { + + err := validateNetworkConfigSettings(newIsolatedNetworkSettings) + if err != nil { + return Task{}, err + } + + // for case when range is one ip address + if newIsolatedNetworkSettings.DhcpSettings != nil && newIsolatedNetworkSettings.DhcpSettings.IPRange != nil && newIsolatedNetworkSettings.DhcpSettings.IPRange.EndAddress == "" { + newIsolatedNetworkSettings.DhcpSettings.IPRange.EndAddress = newIsolatedNetworkSettings.DhcpSettings.IPRange.StartAddress + } + + // only add values if available. Won't be send to API if not provided + var networkFeatures *types.NetworkFeatures + if newIsolatedNetworkSettings.DhcpSettings != nil { + networkFeatures = &types.NetworkFeatures{DhcpService: &types.DhcpService{ + IsEnabled: newIsolatedNetworkSettings.DhcpSettings.IsEnabled, + DefaultLeaseTime: newIsolatedNetworkSettings.DhcpSettings.DefaultLeaseTime, + MaxLeaseTime: newIsolatedNetworkSettings.DhcpSettings.MaxLeaseTime, + IPRange: newIsolatedNetworkSettings.DhcpSettings.IPRange}} + } + + networkConfigurations := vapp.VApp.NetworkConfigSection.NetworkConfig + networkConfigurations = append(networkConfigurations, + types.VAppNetworkConfiguration{ + NetworkName: newIsolatedNetworkSettings.Name, + Description: newIsolatedNetworkSettings.Description, + Configuration: &types.NetworkConfiguration{ + FenceMode: types.FenceModeIsolated, + GuestVlanAllowed: newIsolatedNetworkSettings.GuestVLANAllowed, + Features: networkFeatures, + IPScopes: &types.IPScopes{IPScope: []*types.IPScope{&types.IPScope{IsInherited: false, Gateway: newIsolatedNetworkSettings.Gateway, + Netmask: newIsolatedNetworkSettings.NetMask, DNS1: newIsolatedNetworkSettings.DNS1, + DNS2: newIsolatedNetworkSettings.DNS2, DNSSuffix: newIsolatedNetworkSettings.DNSSuffix, IsEnabled: true, + IPRanges: &types.IPRanges{IPRange: newIsolatedNetworkSettings.StaticIPRanges}}}}, + }, + IsDeployed: false, + }) + + return updateNetworkConfigurations(vapp, networkConfigurations) + +} + +// CreateVappNetwork creates isolated or nat routed(connected to Org VDC network) network for vApp. +// Returns pointer to types.NetworkConfigSection or error +// If orgNetwork is nil, then isolated network created. +func (vapp *VApp) CreateVappNetwork(newNetworkSettings *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork) (*types.NetworkConfigSection, error) { + task, err := vapp.CreateVappNetworkAsync(newNetworkSettings, orgNetwork) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return nil, fmt.Errorf("error getting vApp networks: %#v", err) + } + + return vAppNetworkConfig, nil +} + +// CreateVappNetworkAsync creates asynchronously isolated or nat routed network for vApp. Returns Task or error +// If orgNetwork is nil, then isolated network created. +func (vapp *VApp) CreateVappNetworkAsync(newNetworkSettings *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork) (Task, error) { + + err := validateNetworkConfigSettings(newNetworkSettings) + if err != nil { + return Task{}, err + } + + // for case when range is one ip address + if newNetworkSettings.DhcpSettings != nil && newNetworkSettings.DhcpSettings.IPRange != nil && newNetworkSettings.DhcpSettings.IPRange.EndAddress == "" { + newNetworkSettings.DhcpSettings.IPRange.EndAddress = newNetworkSettings.DhcpSettings.IPRange.StartAddress + } + + // only add values if available. Won't be send to API if not provided + var networkFeatures *types.NetworkFeatures + if newNetworkSettings.DhcpSettings != nil { + networkFeatures = &types.NetworkFeatures{DhcpService: &types.DhcpService{ + IsEnabled: newNetworkSettings.DhcpSettings.IsEnabled, + DefaultLeaseTime: newNetworkSettings.DhcpSettings.DefaultLeaseTime, + MaxLeaseTime: newNetworkSettings.DhcpSettings.MaxLeaseTime, + IPRange: newNetworkSettings.DhcpSettings.IPRange}, + } + } + + networkConfigurations := vapp.VApp.NetworkConfigSection.NetworkConfig + vappConfiguration := types.VAppNetworkConfiguration{ + NetworkName: newNetworkSettings.Name, + Description: newNetworkSettings.Description, + Configuration: &types.NetworkConfiguration{ + FenceMode: types.FenceModeIsolated, + GuestVlanAllowed: newNetworkSettings.GuestVLANAllowed, + Features: networkFeatures, + IPScopes: &types.IPScopes{IPScope: []*types.IPScope{&types.IPScope{IsInherited: false, Gateway: newNetworkSettings.Gateway, + Netmask: newNetworkSettings.NetMask, DNS1: newNetworkSettings.DNS1, + DNS2: newNetworkSettings.DNS2, DNSSuffix: newNetworkSettings.DNSSuffix, IsEnabled: true, + IPRanges: &types.IPRanges{IPRange: newNetworkSettings.StaticIPRanges}}}}, + RetainNetInfoAcrossDeployments: newNetworkSettings.RetainIpMacEnabled, + }, + IsDeployed: false, + } + if orgNetwork != nil { + vappConfiguration.Configuration.ParentNetwork = &types.Reference{ + HREF: orgNetwork.HREF, + } + vappConfiguration.Configuration.FenceMode = types.FenceModeNAT + } + + networkConfigurations = append(networkConfigurations, + vappConfiguration) + + return updateNetworkConfigurations(vapp, networkConfigurations) +} + +// AddOrgNetwork adds Org VDC network as vApp network. +// Returns pointer to types.NetworkConfigSection or error +func (vapp *VApp) AddOrgNetwork(newNetworkSettings *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork, isFenced bool) (*types.NetworkConfigSection, error) { + task, err := vapp.AddOrgNetworkAsync(newNetworkSettings, orgNetwork, isFenced) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return nil, fmt.Errorf("error getting vApp networks: %#v", err) + } + + return vAppNetworkConfig, nil +} + +// AddOrgNetworkAsync adds asynchronously Org VDC network as vApp network. Returns Task or error +func (vapp *VApp) AddOrgNetworkAsync(newNetworkSettings *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork, isFenced bool) (Task, error) { + + if orgNetwork == nil { + return Task{}, errors.New("org VDC network is missing") + } + + fenceMode := types.FenceModeBridged + if isFenced { + fenceMode = types.FenceModeNAT + } + + networkConfigurations := vapp.VApp.NetworkConfigSection.NetworkConfig + vappConfiguration := types.VAppNetworkConfiguration{ + NetworkName: orgNetwork.Name, + Configuration: &types.NetworkConfiguration{ + FenceMode: fenceMode, + ParentNetwork: &types.Reference{ + HREF: orgNetwork.HREF, + }, + RetainNetInfoAcrossDeployments: newNetworkSettings.RetainIpMacEnabled, + }, + IsDeployed: false, + } + networkConfigurations = append(networkConfigurations, + vappConfiguration) + + return updateNetworkConfigurations(vapp, networkConfigurations) + +} + +// UpdateNetwork updates vApp networks (isolated or connected to Org VDC network) +// Returns pointer to types.NetworkConfigSection or error +func (vapp *VApp) UpdateNetwork(newNetworkSettings *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork) (*types.NetworkConfigSection, error) { + task, err := vapp.UpdateNetworkAsync(newNetworkSettings, orgNetwork) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return nil, fmt.Errorf("error getting vApp networks: %#v", err) + } + + return vAppNetworkConfig, nil +} + +// UpdateNetworkAsync asynchronously updates vApp networks (isolated or connected to Org VDC network). +// Returns task or error +func (vapp *VApp) UpdateNetworkAsync(networkSettingsToUpdate *VappNetworkSettings, orgNetwork *types.OrgVDCNetwork) (Task, error) { + util.Logger.Printf("[TRACE] UpdateNetworkAsync with values: %#v and connect to org network: %#v", networkSettingsToUpdate, orgNetwork) + currentNetworkConfiguration, err := vapp.GetNetworkConfig() + if err != nil { + return Task{}, err + } + var networkToUpdate types.VAppNetworkConfiguration + var networkToUpdateIndex int + for index, networkConfig := range currentNetworkConfiguration.NetworkConfig { + if networkConfig.Link != nil { + uuid, err := GetUuidFromHref(networkConfig.Link.HREF, false) + if err != nil { + return Task{}, err + } + if uuid == extractUuid(networkSettingsToUpdate.ID) { + networkToUpdate = networkConfig + networkToUpdateIndex = index + break + } + } + } + + if networkToUpdate == (types.VAppNetworkConfiguration{}) { + return Task{}, fmt.Errorf("not found network to update with Id %s", networkSettingsToUpdate.ID) + } + if networkToUpdate.Configuration == nil { + networkToUpdate.Configuration = &types.NetworkConfiguration{} + } + networkToUpdate.Configuration.RetainNetInfoAcrossDeployments = networkSettingsToUpdate.RetainIpMacEnabled + // new network to connect + if networkToUpdate.Configuration.ParentNetwork == nil && orgNetwork != nil { + networkToUpdate.Configuration.FenceMode = types.FenceModeNAT + networkToUpdate.Configuration.ParentNetwork = &types.Reference{HREF: orgNetwork.HREF} + } + // change network to connect + if networkToUpdate.Configuration.ParentNetwork != nil && orgNetwork != nil && networkToUpdate.Configuration.ParentNetwork.HREF != orgNetwork.HREF { + networkToUpdate.Configuration.ParentNetwork = &types.Reference{HREF: orgNetwork.HREF} + } + // remove network to connect + if orgNetwork == nil { + networkToUpdate.Configuration.FenceMode = types.FenceModeIsolated + networkToUpdate.Configuration.ParentNetwork = nil + } + networkToUpdate.Description = networkSettingsToUpdate.Description + networkToUpdate.NetworkName = networkSettingsToUpdate.Name + networkToUpdate.Configuration.GuestVlanAllowed = networkSettingsToUpdate.GuestVLANAllowed + networkToUpdate.Configuration.IPScopes.IPScope[0].Gateway = networkSettingsToUpdate.Gateway + networkToUpdate.Configuration.IPScopes.IPScope[0].Netmask = networkSettingsToUpdate.NetMask + networkToUpdate.Configuration.IPScopes.IPScope[0].DNS1 = networkSettingsToUpdate.DNS1 + networkToUpdate.Configuration.IPScopes.IPScope[0].DNS2 = networkSettingsToUpdate.DNS2 + networkToUpdate.Configuration.IPScopes.IPScope[0].DNSSuffix = networkSettingsToUpdate.DNSSuffix + networkToUpdate.Configuration.IPScopes.IPScope[0].IPRanges = &types.IPRanges{IPRange: networkSettingsToUpdate.StaticIPRanges} + + // for case when range is one ip address + if networkSettingsToUpdate.DhcpSettings != nil && networkSettingsToUpdate.DhcpSettings.IPRange != nil && networkSettingsToUpdate.DhcpSettings.IPRange.EndAddress == "" { + networkSettingsToUpdate.DhcpSettings.IPRange.EndAddress = networkSettingsToUpdate.DhcpSettings.IPRange.StartAddress + } + + // remove DHCP config + if networkSettingsToUpdate.DhcpSettings == nil { + networkToUpdate.Configuration.Features.DhcpService = nil + } + + // create DHCP config + if networkSettingsToUpdate.DhcpSettings != nil && networkToUpdate.Configuration.Features.DhcpService == nil { + networkToUpdate.Configuration.Features.DhcpService = &types.DhcpService{ + IsEnabled: networkSettingsToUpdate.DhcpSettings.IsEnabled, + DefaultLeaseTime: networkSettingsToUpdate.DhcpSettings.DefaultLeaseTime, + MaxLeaseTime: networkSettingsToUpdate.DhcpSettings.MaxLeaseTime, + IPRange: networkSettingsToUpdate.DhcpSettings.IPRange} + } + + // update DHCP config + if networkSettingsToUpdate.DhcpSettings != nil && networkToUpdate.Configuration.Features.DhcpService != nil { + networkToUpdate.Configuration.Features.DhcpService.IsEnabled = networkSettingsToUpdate.DhcpSettings.IsEnabled + networkToUpdate.Configuration.Features.DhcpService.DefaultLeaseTime = networkSettingsToUpdate.DhcpSettings.DefaultLeaseTime + networkToUpdate.Configuration.Features.DhcpService.MaxLeaseTime = networkSettingsToUpdate.DhcpSettings.MaxLeaseTime + networkToUpdate.Configuration.Features.DhcpService.IPRange = networkSettingsToUpdate.DhcpSettings.IPRange + } + + currentNetworkConfiguration.NetworkConfig[networkToUpdateIndex] = networkToUpdate + + return updateNetworkConfigurations(vapp, currentNetworkConfiguration.NetworkConfig) +} + +// UpdateOrgNetwork updates Org VDC network which is part of a vApp +// Returns pointer to types.NetworkConfigSection or error +func (vapp *VApp) UpdateOrgNetwork(newNetworkSettings *VappNetworkSettings, isFenced bool) (*types.NetworkConfigSection, error) { + task, err := vapp.UpdateOrgNetworkAsync(newNetworkSettings, isFenced) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return nil, fmt.Errorf("error getting vApp networks: %#v", err) + } + + return vAppNetworkConfig, nil +} + +// UpdateOrgNetworkAsync asynchronously updates Org VDC network which is part of a vApp +// Returns task or error +func (vapp *VApp) UpdateOrgNetworkAsync(networkSettingsToUpdate *VappNetworkSettings, isFenced bool) (Task, error) { + util.Logger.Printf("[TRACE] UpdateOrgNetworkAsync with values: %#v ", networkSettingsToUpdate) + currentNetworkConfiguration, err := vapp.GetNetworkConfig() + if err != nil { + return Task{}, err + } + var networkToUpdate types.VAppNetworkConfiguration + var networkToUpdateIndex int + + for index, networkConfig := range currentNetworkConfiguration.NetworkConfig { + if networkConfig.Link != nil { + uuid, err := GetUuidFromHref(networkConfig.Link.HREF, false) + if err != nil { + return Task{}, err + } + + if uuid == extractUuid(networkSettingsToUpdate.ID) { + networkToUpdate = networkConfig + networkToUpdateIndex = index + break + } + } + } + + if networkToUpdate == (types.VAppNetworkConfiguration{}) { + return Task{}, fmt.Errorf("not found network to update with Id %s", networkSettingsToUpdate.ID) + } + + fenceMode := types.FenceModeBridged + if isFenced { + fenceMode = types.FenceModeNAT + } + + if networkToUpdate.Configuration == nil { + networkToUpdate.Configuration = &types.NetworkConfiguration{} + } + networkToUpdate.Configuration.RetainNetInfoAcrossDeployments = networkSettingsToUpdate.RetainIpMacEnabled + networkToUpdate.Configuration.FenceMode = fenceMode + + currentNetworkConfiguration.NetworkConfig[networkToUpdateIndex] = networkToUpdate + + return updateNetworkConfigurations(vapp, currentNetworkConfiguration.NetworkConfig) +} + +func validateNetworkConfigSettings(networkSettings *VappNetworkSettings) error { + if networkSettings.Name == "" { + return errors.New("network name is missing") + } + + if networkSettings.Gateway == "" { + return errors.New("network gateway IP is missing") + } + + if networkSettings.NetMask == "" { + return errors.New("network mask config is missing") + } + + if networkSettings.NetMask == "" { + return errors.New("network mask config is missing") + } + + if networkSettings.DhcpSettings != nil && networkSettings.DhcpSettings.IPRange == nil { + return errors.New("network DHCP ip range config is missing") + } + + if networkSettings.DhcpSettings != nil && networkSettings.DhcpSettings.IPRange.StartAddress == "" { + return errors.New("network DHCP ip range start address is missing") + } + + return nil +} + +// RemoveNetwork removes any network (be it isolated or connected to an Org Network) from vApp +// Returns pointer to types.NetworkConfigSection or error +func (vapp *VApp) RemoveNetwork(identifier string) (*types.NetworkConfigSection, error) { + task, err := vapp.RemoveNetworkAsync(identifier) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + vAppNetworkConfig, err := vapp.GetNetworkConfig() + if err != nil { + return nil, fmt.Errorf("error getting vApp networks: %#v", err) + } + + return vAppNetworkConfig, nil +} + +// RemoveNetworkAsync asynchronously removes any network (be it isolated or connected to an Org Network) from vApp +// Accepts network ID or name +func (vapp *VApp) RemoveNetworkAsync(identifier string) (Task, error) { + + if identifier == "" { + return Task{}, fmt.Errorf("network ID/name can't be empty") + } + + networkConfigurations := vapp.VApp.NetworkConfigSection.NetworkConfig + for _, networkConfig := range networkConfigurations { + networkId, err := GetUuidFromHref(networkConfig.Link.HREF, false) + if err != nil { + return Task{}, fmt.Errorf("unable to get network ID from HREF: %s", err) + } + if networkId == extractUuid(identifier) || networkConfig.NetworkName == identifier { + deleteUrl := vapp.client.VCDHREF.String() + "/network/" + networkId + errMessage := fmt.Sprintf("detaching vApp network %s (id '%s'): %%s", networkConfig.NetworkName, networkId) + task, err := vapp.client.ExecuteTaskRequest(deleteUrl, http.MethodDelete, types.AnyXMLMime, errMessage, nil) + if err != nil { + return Task{}, err + } + + return task, nil + } + } + + return Task{}, fmt.Errorf("network to remove %s, wasn't found", identifier) + +} + +// Removes vApp isolated network +// Deprecated: in favor vapp.RemoveNetwork +func (vapp *VApp) RemoveIsolatedNetwork(networkName string) (Task, error) { + + if networkName == "" { + return Task{}, fmt.Errorf("network name can't be empty") + } + + networkConfigurations := vapp.VApp.NetworkConfigSection.NetworkConfig + isNetworkFound := false + for index, networkConfig := range networkConfigurations { + if networkConfig.NetworkName == networkName { + isNetworkFound = true + networkConfigurations = append(networkConfigurations[:index], networkConfigurations[index+1:]...) + } + } + + if !isNetworkFound { + return Task{}, fmt.Errorf("network to remove %s, wasn't found", networkName) + } + + return updateNetworkConfigurations(vapp, networkConfigurations) +} + +// Function allows to update vApp network configuration. This works for updating, deleting and adding. +// Network configuration has to be full with new, changed elements and unchanged. +// https://opengrok.eng.vmware.com/source/xref/cloud-sp-main.perforce-shark.1700/sp-main/dev-integration/system-tests/SystemTests/src/main/java/com/vmware/cloud/systemtests/util/VAppNetworkUtils.java#createVAppNetwork +// http://pubs.vmware.com/vcloud-api-1-5/wwhelp/wwhimpl/js/html/wwhelp.htm#href=api_prog/GUID-92622A15-E588-4FA1-92DA-A22A4757F2A0.html#1_14_12_10_1 +func updateNetworkConfigurations(vapp *VApp, networkConfigurations []types.VAppNetworkConfiguration) (Task, error) { + util.Logger.Printf("[TRACE] updateNetworkConfigurations for vAPP: %#v and network config: %#v", vapp, networkConfigurations) + networkConfig := &types.NetworkConfigSection{ + Info: "Configuration parameters for logical networks", + Ovf: types.XMLNamespaceOVF, + Type: types.MimeNetworkConfigSection, + Xmlns: types.XMLNamespaceVCloud, + NetworkConfig: networkConfigurations, + } + + apiEndpoint := urlParseRequestURI(vapp.VApp.HREF) + apiEndpoint.Path += "/networkConfigSection/" + + // Return the task + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeNetworkConfigSection, "error updating vApp Network: %s", networkConfig) +} + +// RemoveAllNetworks detaches all networks from vApp +func (vapp *VApp) RemoveAllNetworks() (Task, error) { + return updateNetworkConfigurations(vapp, []types.VAppNetworkConfiguration{}) +} + +// SetProductSectionList sets product section for a vApp. It allows to change vApp guest properties. +// +// The slice of properties "ProductSectionList.ProductSection.Property" is not necessarily ordered +// or returned as set before +func (vapp *VApp) SetProductSectionList(productSection *types.ProductSectionList) (*types.ProductSectionList, error) { + err := setProductSectionList(vapp.client, vapp.VApp.HREF, productSection) + if err != nil { + return nil, fmt.Errorf("unable to set vApp product section: %s", err) + } + + return vapp.GetProductSectionList() +} + +// GetProductSectionList retrieves product section for a vApp. It allows to read vApp guest properties. +// +// The slice of properties "ProductSectionList.ProductSection.Property" is not necessarily ordered +// or returned as set before +func (vapp *VApp) GetProductSectionList() (*types.ProductSectionList, error) { + return getProductSectionList(vapp.client, vapp.VApp.HREF) +} + +// GetVMByName returns a VM reference if the VM name matches an existing one. +// If no valid VM is found, it returns a nil VM reference and an error +func (vapp *VApp) GetVMByName(vmName string, refresh bool) (*VM, error) { + if refresh { + err := vapp.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vApp: %s", err) + } + } + + //vApp Might Not Have Any VMs + if vapp.VApp.Children == nil { + return nil, ErrorEntityNotFound + } + + util.Logger.Printf("[TRACE] Looking for VM: %s", vmName) + for _, child := range vapp.VApp.Children.VM { + + util.Logger.Printf("[TRACE] Looking at: %s", child.Name) + if child.Name == vmName { + return vapp.client.GetVMByHref(child.HREF) + } + + } + util.Logger.Printf("[TRACE] Couldn't find VM: %s", vmName) + return nil, ErrorEntityNotFound +} + +// GetVMById returns a VM reference if the VM ID matches an existing one. +// If no valid VM is found, it returns a nil VM reference and an error +func (vapp *VApp) GetVMById(id string, refresh bool) (*VM, error) { + if refresh { + err := vapp.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vApp: %s", err) + } + } + + //vApp Might Not Have Any VMs + if vapp.VApp.Children == nil { + return nil, ErrorEntityNotFound + } + + util.Logger.Printf("[TRACE] Looking for VM: %s", id) + for _, child := range vapp.VApp.Children.VM { + + util.Logger.Printf("[TRACE] Looking at: %s", child.Name) + if equalIds(id, child.ID, child.HREF) { + return vapp.client.GetVMByHref(child.HREF) + } + } + util.Logger.Printf("[TRACE] Couldn't find VM: %s", id) + return nil, ErrorEntityNotFound +} + +// GetVMByNameOrId returns a VM reference if either the VM name or ID matches an existing one. +// If no valid VM is found, it returns a nil VM reference and an error +func (vapp *VApp) GetVMByNameOrId(identifier string, refresh bool) (*VM, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vapp.GetVMByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return vapp.GetVMById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*VM), err +} + +// QueryVappList returns a list of all vApps in all the organizations available to the caller +func (client *Client) QueryVappList() ([]*types.QueryResultVAppRecordType, error) { + var vappList []*types.QueryResultVAppRecordType + queryType := client.GetQueryType(types.QtVapp) + params := map[string]string{ + "type": queryType, + "filterEncoded": "true", + } + vappResult, err := client.cumulativeQuery(queryType, nil, params) + if err != nil { + return nil, fmt.Errorf("error getting vApp list : %s", err) + } + vappList = vappResult.Results.VAppRecord + if client.IsSysAdmin { + vappList = vappResult.Results.AdminVAppRecord + } + return vappList, nil +} + +// getOrgInfo finds the organization to which the vApp belongs (through the VDC), and returns its name and ID +func (vapp *VApp) getOrgInfo() (*TenantContext, error) { + previous, exists := orgInfoCache[vapp.VApp.ID] + if exists { + return previous, nil + } + var err error + vdc, err := vapp.getParentVDC() + if err != nil { + return nil, err + } + return vdc.getTenantContext() +} + +// UpdateNameDescription can change the name and the description of a vApp +// If name is empty, it is left unchanged. +func (vapp *VApp) UpdateNameDescription(newName, newDescription string) error { + if vapp == nil || vapp.VApp.HREF == "" { + return fmt.Errorf("vApp or href cannot be empty") + } + + // Skip update if we are using the original values + if (newName == vapp.VApp.Name || newName == "") && (newDescription == vapp.VApp.Description) { + return nil + } + + opType := types.MimeRecomposeVappParams + + href := "" + for _, link := range vapp.VApp.Link { + if link.Type == opType && link.Rel == "recompose" { + href = link.HREF + break + } + } + + if href == "" { + return fmt.Errorf("no appropriate link for update found for vApp %s", vapp.VApp.Name) + } + + if newName == "" { + newName = vapp.VApp.Name + } + + recomposeParams := &types.SmallRecomposeVappParams{ + XMLName: xml.Name{}, + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + Name: newName, + Description: newDescription, + Deploy: vapp.VApp.Deployed, + } + + task, err := vapp.client.ExecuteTaskRequest(href, http.MethodPost, + opType, "error updating vapp: %s", recomposeParams) + + if err != nil { + return fmt.Errorf("unable to update vApp: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("task for updating vApp failed: %s", err) + } + return vapp.Refresh() +} + +// UpdateDescription changes the description of a vApp +func (vapp *VApp) UpdateDescription(newDescription string) error { + return vapp.UpdateNameDescription("", newDescription) +} + +// Rename changes the name of a vApp +func (vapp *VApp) Rename(newName string) error { + return vapp.UpdateNameDescription(newName, vapp.VApp.Description) +} + +func (vapp *VApp) getTenantContext() (*TenantContext, error) { + parentVdc, err := vapp.getParentVDC() + if err != nil { + return nil, err + } + return parentVdc.getTenantContext() +} + +// RenewLease updates the lease terms for the vApp +func (vapp *VApp) RenewLease(deploymentLeaseInSeconds, storageLeaseInSeconds int) error { + + href := "" + if vapp.VApp.LeaseSettingsSection != nil { + if vapp.VApp.LeaseSettingsSection.DeploymentLeaseInSeconds == deploymentLeaseInSeconds && + vapp.VApp.LeaseSettingsSection.StorageLeaseInSeconds == storageLeaseInSeconds { + // Requested parameters are the same as existing parameters: exit without updating + return nil + } + href = vapp.VApp.LeaseSettingsSection.HREF + } + if href == "" { + for _, link := range vapp.VApp.Link { + if link.Rel == "edit" && link.Type == types.MimeLeaseSettingSection { + href = link.HREF + break + } + } + } + if href == "" { + return fmt.Errorf("link to update lease sttings not found for vApp %s", vapp.VApp.Name) + } + + var leaseSettings = types.UpdateLeaseSettingsSection{ + HREF: href, + XmlnsOvf: types.XMLNamespaceOVF, + Xmlns: types.XMLNamespaceVCloud, + OVFInfo: "Lease section settings", + Type: types.MimeLeaseSettingSection, + DeploymentLeaseInSeconds: takeIntAddress(deploymentLeaseInSeconds), + StorageLeaseInSeconds: takeIntAddress(storageLeaseInSeconds), + } + + task, err := vapp.client.ExecuteTaskRequest(href, http.MethodPut, + types.MimeLeaseSettingSection, "error updating vapp lease : %s", &leaseSettings) + + if err != nil { + return fmt.Errorf("unable to update vApp lease: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("task for updating vApp lease failed: %s", err) + } + return vapp.Refresh() +} + +// GetLease retrieves the lease terms for a vApp +func (vapp *VApp) GetLease() (*types.LeaseSettingsSection, error) { + + href := "" + if vapp.VApp.LeaseSettingsSection != nil { + href = vapp.VApp.LeaseSettingsSection.HREF + } + if href == "" { + for _, link := range vapp.VApp.Link { + if link.Type == types.MimeLeaseSettingSection { + href = link.HREF + break + } + } + } + if href == "" { + return nil, fmt.Errorf("link to retrieve lease settings not found for vApp %s", vapp.VApp.Name) + } + var leaseSettings types.LeaseSettingsSection + + _, err := vapp.client.ExecuteRequest(href, http.MethodGet, "", "error getting vApp lease info: %s", nil, &leaseSettings) + + if err != nil { + return nil, err + } + return &leaseSettings, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp_network.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp_network.go new file mode 100644 index 000000000..5f3acb6cc --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapp_network.go @@ -0,0 +1,302 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" + "net/http" +) + +// UpdateNetworkFirewallRules updates vApp networks firewall rules. It will overwrite existing ones as there is +// no 100% way to identify them separately. +// Returns pointer to types.VAppNetwork or error +func (vapp *VApp) UpdateNetworkFirewallRules(networkId string, firewallRules []*types.FirewallRule, enabled bool, defaultAction string, logDefaultAction bool) (*types.VAppNetwork, error) { + task, err := vapp.UpdateNetworkFirewallRulesAsync(networkId, firewallRules, enabled, defaultAction, logDefaultAction) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + return vapp.GetVappNetworkById(networkId, false) +} + +// UpdateNetworkFirewallRulesAsync asynchronously updates vApp networks firewall rules. It will overwrite existing ones +// as there is no 100% way to identify them separately. +// Returns task or error +func (vapp *VApp) UpdateNetworkFirewallRulesAsync(networkId string, firewallRules []*types.FirewallRule, enabled bool, defaultAction string, logDefaultAction bool) (Task, error) { + util.Logger.Printf("[TRACE] UpdateNetworkFirewallRulesAsync with values: id: %s and firewallServiceConfiguration: %#v", networkId, firewallRules) + uuid := extractUuid(networkId) + networkToUpdate, err := vapp.GetVappNetworkById(uuid, true) + if err != nil { + return Task{}, err + } + + if networkToUpdate.Configuration.Features == nil { + networkToUpdate.Configuration.Features = &types.NetworkFeatures{} + } + networkToUpdate.Xmlns = types.XMLNamespaceVCloud + + // If API didn't return Firewall service XML part, that means vApp network isn't connected to org network or not fenced. + // In other words there isn't firewall when you connected directly or isolated. + if networkToUpdate.Configuration.Features.FirewallService == nil { + return Task{}, fmt.Errorf("provided network isn't connecd to org network or isn't fenced") + } + networkToUpdate.Configuration.Features.FirewallService.IsEnabled = enabled + networkToUpdate.Configuration.Features.FirewallService.LogDefaultAction = logDefaultAction + networkToUpdate.Configuration.Features.FirewallService.DefaultAction = defaultAction + networkToUpdate.Configuration.Features.FirewallService.FirewallRule = firewallRules + + // here we use `PUT /network/{id}` which allow to change vApp network. + // But `GET /network/{id}` can return org VDC network or vApp network. + apiEndpoint := vapp.client.VCDHREF + apiEndpoint.Path += "/network/" + uuid + + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeVappNetwork, "error updating vApp Network firewall rules: %s", networkToUpdate) +} + +// GetVappNetworkById returns a VApp network reference if the vApp network ID matches an existing one. +// If no valid VApp network is found, it returns a nil VApp network reference and an error +func (vapp *VApp) GetVappNetworkById(id string, refresh bool) (*types.VAppNetwork, error) { + util.Logger.Printf("[TRACE] [GetVappNetworkById] getting vApp Network: %s and refresh %t", id, refresh) + + if refresh { + err := vapp.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vApp: %s", err) + } + } + + //vApp Might Not Have Any networks + if vapp.VApp.NetworkConfigSection == nil || len(vapp.VApp.NetworkConfigSection.NetworkConfig) == 0 { + return nil, ErrorEntityNotFound + } + + util.Logger.Printf("[TRACE] Looking for networks: %s --- %d", id, len(vapp.VApp.NetworkConfigSection.NetworkConfig)) + for _, vappNetwork := range vapp.VApp.NetworkConfigSection.NetworkConfig { + // Break early for empty network interfaces. They don't have all information + if vappNetwork.NetworkName == types.NoneNetwork { + continue + } + util.Logger.Printf("[TRACE] Looking at: %s", vappNetwork.Link.HREF) + if equalIds(id, vappNetwork.ID, vappNetwork.Link.HREF) { + vappNetwork := &types.VAppNetwork{} + + apiEndpoint := vapp.client.VCDHREF + apiEndpoint.Path += "/network/" + extractUuid(id) + + _, err := vapp.client.ExecuteRequest(apiEndpoint.String(), http.MethodGet, + types.MimeVappNetwork, "error getting vApp network: %s", nil, vappNetwork) + if err != nil { + return nil, err + } + return vappNetwork, nil + } + } + util.Logger.Printf("[TRACE] GetVappNetworkById returns not found entity") + return nil, ErrorEntityNotFound +} + +// GetVappNetworkByName returns a VAppNetwork reference if the vApp network name matches an existing one. +// If no valid vApp network is found, it returns a nil VAppNetwork reference and an error +func (vapp *VApp) GetVappNetworkByName(vappNetworkName string, refresh bool) (*types.VAppNetwork, error) { + util.Logger.Printf("[TRACE] [GetVappNetworkByName] getting vApp Network: %s and refresh %t", vappNetworkName, refresh) + if refresh { + err := vapp.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vApp: %s", err) + } + } + + //vApp Might Not Have Any networks + if vapp.VApp.NetworkConfigSection == nil || len(vapp.VApp.NetworkConfigSection.NetworkConfig) == 0 { + return nil, ErrorEntityNotFound + } + + util.Logger.Printf("[TRACE] Looking for networks: %s", vappNetworkName) + for _, vappNetwork := range vapp.VApp.NetworkConfigSection.NetworkConfig { + + util.Logger.Printf("[TRACE] Looking at: %s", vappNetwork.NetworkName) + if vappNetwork.NetworkName == vappNetworkName { + return vapp.GetVappNetworkById(extractUuid(vappNetwork.Link.HREF), refresh) + } + + } + util.Logger.Printf("[TRACE] Couldn't find vApp network: %s", vappNetworkName) + return nil, ErrorEntityNotFound +} + +// GetVappNetworkByNameOrId returns a types.VAppNetwork reference if either the vApp network name or ID matches an existing one. +// If no valid vApp network is found, it returns a nil types.VAppNetwork reference and an error +func (vapp *VApp) GetVappNetworkByNameOrId(identifier string, refresh bool) (*types.VAppNetwork, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vapp.GetVappNetworkByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return vapp.GetVappNetworkById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*types.VAppNetwork), err +} + +// UpdateNetworkNatRules updates vApp networks NAT rules. +// Returns pointer to types.VAppNetwork or error +func (vapp *VApp) UpdateNetworkNatRules(networkId string, natRules []*types.NatRule, enabled bool, natType, policy string) (*types.VAppNetwork, error) { + task, err := vapp.UpdateNetworkNatRulesAsync(networkId, natRules, enabled, natType, policy) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + return vapp.GetVappNetworkById(networkId, false) +} + +// UpdateNetworkNatRulesAsync asynchronously updates vApp NAT rules. +// Returns task or error +func (vapp *VApp) UpdateNetworkNatRulesAsync(networkId string, natRules []*types.NatRule, enabled bool, natType, policy string) (Task, error) { + util.Logger.Printf("[TRACE] UpdateNetworkNatRulesAsync with values: id: %s and natRules: %#v", networkId, natRules) + + uuid := extractUuid(networkId) + networkToUpdate, err := vapp.GetVappNetworkById(uuid, true) + if err != nil { + return Task{}, err + } + + if networkToUpdate.Configuration.Features == nil { + networkToUpdate.Configuration.Features = &types.NetworkFeatures{} + } + networkToUpdate.Xmlns = types.XMLNamespaceVCloud + + // if services are empty return by API, then we can deduce that network isn't connected to Org network or fenced + if networkToUpdate.Configuration.Features.NatService == nil && networkToUpdate.Configuration.Features.FirewallService == nil { + return Task{}, fmt.Errorf("provided network isn't connected to org network or isn't fenced") + } + if networkToUpdate.Configuration.Features.NatService == nil { + networkToUpdate.Configuration.Features.NatService = &types.NatService{} + } + networkToUpdate.Configuration.Features.NatService.IsEnabled = enabled + networkToUpdate.Configuration.Features.NatService.NatType = natType + networkToUpdate.Configuration.Features.NatService.Policy = policy + networkToUpdate.Configuration.Features.NatService.NatRule = natRules + + // here we use `PUT /network/{id}` which allow to change vApp network. + // But `GET /network/{id}` can return org VDC network or vApp network. + apiEndpoint := vapp.client.VCDHREF + apiEndpoint.Path += "/network/" + uuid + + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeVappNetwork, "error updating vApp Network NAT rules: %s", networkToUpdate) +} + +// RemoveAllNetworkNatRules removes all NAT rules from a vApp network +// Returns error +func (vapp *VApp) RemoveAllNetworkNatRules(networkId string) error { + task, err := vapp.UpdateNetworkNatRulesAsync(networkId, []*types.NatRule{}, false, "ipTranslation", "allowTraffic") + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + return nil +} + +// RemoveAllNetworkFirewallRules removes all network firewall rules from a vApp network. +// Returns error +func (vapp *VApp) RemoveAllNetworkFirewallRules(networkId string) error { + networkToUpdate, err := vapp.GetVappNetworkById(networkId, true) + if err != nil { + return err + } + task, err := vapp.UpdateNetworkFirewallRulesAsync(networkId, []*types.FirewallRule{}, false, + networkToUpdate.Configuration.Features.FirewallService.DefaultAction, networkToUpdate.Configuration.Features.FirewallService.LogDefaultAction) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + return nil +} + +// UpdateNetworkStaticRouting updates vApp network static routes. +// Returns pointer to types.VAppNetwork or error +func (vapp *VApp) UpdateNetworkStaticRouting(networkId string, staticRoutes []*types.StaticRoute, enabled bool) (*types.VAppNetwork, error) { + task, err := vapp.UpdateNetworkStaticRoutingAsync(networkId, staticRoutes, enabled) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + + return vapp.GetVappNetworkById(networkId, false) +} + +// UpdateNetworkStaticRoutingAsync asynchronously updates vApp network static routes. +// Returns task or error +func (vapp *VApp) UpdateNetworkStaticRoutingAsync(networkId string, staticRoutes []*types.StaticRoute, enabled bool) (Task, error) { + util.Logger.Printf("[TRACE] UpdateNetworkStaticRoutingAsync with values: id: %s and staticRoutes: %#v, enable: %t", networkId, staticRoutes, enabled) + + uuid := extractUuid(networkId) + networkToUpdate, err := vapp.GetVappNetworkById(uuid, true) + if err != nil { + return Task{}, err + } + + if !IsVappNetwork(networkToUpdate.Configuration) { + return Task{}, fmt.Errorf("network static routing can be applied only for vapp network, not vapp org network") + } + + if networkToUpdate.Configuration.Features == nil { + networkToUpdate.Configuration.Features = &types.NetworkFeatures{} + } + networkToUpdate.Xmlns = types.XMLNamespaceVCloud + + networkToUpdate.Configuration.Features.StaticRoutingService = &types.StaticRoutingService{IsEnabled: enabled, StaticRoute: staticRoutes} + + // here we use `PUT /network/{id}` which allow to change vApp network. + // But `GET /network/{id}` can return org VDC network or vApp network. + apiEndpoint := vapp.client.VCDHREF + apiEndpoint.Path += "/network/" + uuid + + return vapp.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeVappNetwork, "error updating vApp Network static routes: %s", networkToUpdate) +} + +// IsVappNetwork allows to identify if given network config is a vApp network and not a vApp Org network +func IsVappNetwork(networkConfig *types.NetworkConfiguration) bool { + if networkConfig.FenceMode == types.FenceModeIsolated || + (networkConfig.FenceMode == types.FenceModeNAT && networkConfig.IPScopes != nil && + networkConfig.IPScopes.IPScope != nil && len(networkConfig.IPScopes.IPScope) > 0 && + !networkConfig.IPScopes.IPScope[0].IsInherited) { + return true + } + return false +} + +// RemoveAllNetworkStaticRoutes removes all static routes from a vApp network +// Returns error +func (vapp *VApp) RemoveAllNetworkStaticRoutes(networkId string) error { + task, err := vapp.UpdateNetworkStaticRoutingAsync(networkId, []*types.StaticRoute{}, false) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("%s", combinedTaskErrorMessage(task.Task, err)) + } + return nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapptemplate.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapptemplate.go new file mode 100644 index 000000000..be2f2d02a --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vapptemplate.go @@ -0,0 +1,71 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +type VAppTemplate struct { + VAppTemplate *types.VAppTemplate + client *Client +} + +func NewVAppTemplate(cli *Client) *VAppTemplate { + return &VAppTemplate{ + VAppTemplate: new(types.VAppTemplate), + client: cli, + } +} + +func (vdc *Vdc) InstantiateVAppTemplate(template *types.InstantiateVAppTemplateParams) error { + vdcHref, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return fmt.Errorf("error getting vdc href: %s", err) + } + vdcHref.Path += "/action/instantiateVAppTemplate" + + vapptemplate := NewVAppTemplate(vdc.client) + + _, err = vdc.client.ExecuteRequest(vdcHref.String(), http.MethodPut, + types.MimeInstantiateVappTemplateParams, "error instantiating a new template: %s", template, vapptemplate) + if err != nil { + return err + } + + task := NewTask(vdc.client) + for _, taskItem := range vapptemplate.VAppTemplate.Tasks.Task { + task.Task = taskItem + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error performing task: %s", err) + } + } + return nil +} + +// Refresh refreshes the vApp template item information by href +func (vAppTemplate *VAppTemplate) Refresh() error { + + if vAppTemplate.VAppTemplate == nil { + return fmt.Errorf("cannot refresh, Object is empty") + } + + url := vAppTemplate.VAppTemplate.HREF + if url == "nil" { + return fmt.Errorf("cannot refresh, HREF is empty") + } + + vAppTemplate.VAppTemplate = &types.VAppTemplate{} + + _, err := vAppTemplate.client.ExecuteRequest(url, http.MethodGet, + "", "error retrieving vApp template item: %s", nil, vAppTemplate.VAppTemplate) + + return err +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc.go new file mode 100644 index 000000000..98613da6f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc.go @@ -0,0 +1,1234 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type Vdc struct { + Vdc *types.Vdc + client *Client + parent organization +} + +func NewVdc(cli *Client) *Vdc { + return &Vdc{ + Vdc: new(types.Vdc), + client: cli, + } +} + +// Gets a vapp with a specific url vappHREF +func (vdc *Vdc) getVdcVAppbyHREF(vappHREF *url.URL) (*VApp, error) { + vapp := NewVApp(vdc.client) + + _, err := vdc.client.ExecuteRequest(vappHREF.String(), http.MethodGet, + "", "error retrieving VApp: %s", nil, vapp.VApp) + + return vapp, err +} + +// Undeploys every vapp in the vdc +func (vdc *Vdc) undeployAllVdcVApps() error { + err := vdc.Refresh() + if err != nil { + return fmt.Errorf("error refreshing vdc: %s", err) + } + for _, resents := range vdc.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + if resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + vappHREF, err := url.Parse(resent.HREF) + if err != nil { + return err + } + vapp, err := vdc.getVdcVAppbyHREF(vappHREF) + if err != nil { + return fmt.Errorf("error retrieving vapp with url: %s and with error %s", vappHREF.Path, err) + } + task, err := vapp.Undeploy() + if err != nil { + return err + } + if task == (Task{}) { + continue + } + err = task.WaitTaskCompletion() + if err != nil { + return err + } + } + } + } + return nil +} + +// Removes all vapps in the vdc +func (vdc *Vdc) removeAllVdcVApps() error { + err := vdc.Refresh() + if err != nil { + return fmt.Errorf("error refreshing vdc: %s", err) + } + for _, resents := range vdc.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + if resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + vappHREF, err := url.Parse(resent.HREF) + if err != nil { + return err + } + vapp, err := vdc.getVdcVAppbyHREF(vappHREF) + if err != nil { + return fmt.Errorf("error retrieving vapp with url: %s and with error %s", vappHREF.Path, err) + } + task, err := vapp.Delete() + if err != nil { + return fmt.Errorf("error deleting vapp: %s", err) + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish removing vapp %s", err) + } + } + } + } + return nil +} + +func (vdc *Vdc) Refresh() error { + + if vdc.Vdc.HREF == "" { + return fmt.Errorf("cannot refresh, Object is empty") + } + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + unmarshalledVdc := &types.Vdc{} + + _, err := vdc.client.ExecuteRequest(vdc.Vdc.HREF, http.MethodGet, + "", "error refreshing vDC: %s", nil, unmarshalledVdc) + if err != nil { + return err + } + + vdc.Vdc = unmarshalledVdc + + // The request was successful + return nil +} + +// Deletes the vdc, returning an error of the vCD call fails. +// API Documentation: https://code.vmware.com/apis/220/vcloud#/doc/doc/operations/DELETE-Vdc.html +func (vdc *Vdc) Delete(force bool, recursive bool) (Task, error) { + util.Logger.Printf("[TRACE] Vdc.Delete - deleting VDC with force: %t, recursive: %t", force, recursive) + + if vdc.Vdc.HREF == "" { + return Task{}, fmt.Errorf("cannot delete, Object is empty") + } + + vdcUrl, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return Task{}, fmt.Errorf("error parsing vdc url: %s", err) + } + + req := vdc.client.NewRequest(map[string]string{ + "force": strconv.FormatBool(force), + "recursive": strconv.FormatBool(recursive), + }, http.MethodDelete, *vdcUrl, nil) + resp, err := checkResp(vdc.client.Http.Do(req)) + if err != nil { + return Task{}, fmt.Errorf("error deleting vdc: %s", err) + } + task := NewTask(vdc.client) + if err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil { + return Task{}, fmt.Errorf("error decoding task response: %s", err) + } + if task.Task.Status == "error" { + return Task{}, fmt.Errorf("vdc not properly destroyed") + } + return *task, nil +} + +// Deletes the vdc and waits for the asynchronous task to complete. +func (vdc *Vdc) DeleteWait(force bool, recursive bool) error { + task, err := vdc.Delete(force, recursive) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("couldn't finish removing vdc %s", err) + } + return nil +} + +// Deprecated: use GetOrgVdcNetworkByName +func (vdc *Vdc) FindVDCNetwork(network string) (OrgVDCNetwork, error) { + + err := vdc.Refresh() + if err != nil { + return OrgVDCNetwork{}, fmt.Errorf("error refreshing vdc: %s", err) + } + for _, an := range vdc.Vdc.AvailableNetworks { + for _, reference := range an.Network { + if reference.Name == network { + orgNet := NewOrgVDCNetwork(vdc.client) + + _, err := vdc.client.ExecuteRequest(reference.HREF, http.MethodGet, + "", "error retrieving org vdc network: %s", nil, orgNet.OrgVDCNetwork) + + // The request was successful + return *orgNet, err + + } + } + } + + return OrgVDCNetwork{}, fmt.Errorf("can't find VDC Network: %s", network) +} + +// GetOrgVdcNetworkByHref returns an Org VDC Network reference if the network HREF matches an existing one. +// If no valid external network is found, it returns a nil Network reference and an error +func (vdc *Vdc) GetOrgVdcNetworkByHref(href string) (*OrgVDCNetwork, error) { + + orgNet := NewOrgVDCNetwork(vdc.client) + + _, err := vdc.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving org vdc network: %s", nil, orgNet.OrgVDCNetwork) + + // The request was successful + return orgNet, err +} + +// GetOrgVdcNetworkByName returns an Org VDC Network reference if the network name matches an existing one. +// If no valid external network is found, it returns a nil Network reference and an error +func (vdc *Vdc) GetOrgVdcNetworkByName(name string, refresh bool) (*OrgVDCNetwork, error) { + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vdc: %s", err) + } + } + for _, an := range vdc.Vdc.AvailableNetworks { + for _, reference := range an.Network { + if reference.Name == name { + return vdc.GetOrgVdcNetworkByHref(reference.HREF) + } + } + } + + return nil, ErrorEntityNotFound +} + +// GetOrgVdcNetworkById returns an Org VDC Network reference if the network ID matches an existing one. +// If no valid external network is found, it returns a nil Network reference and an error +func (vdc *Vdc) GetOrgVdcNetworkById(id string, refresh bool) (*OrgVDCNetwork, error) { + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing vdc: %s", err) + } + } + for _, an := range vdc.Vdc.AvailableNetworks { + for _, reference := range an.Network { + // Some versions of vCD do not return an ID in the network reference + // We use equalIds to overcome this issue + if equalIds(id, reference.ID, reference.HREF) { + return vdc.GetOrgVdcNetworkByHref(reference.HREF) + } + } + } + + return nil, ErrorEntityNotFound +} + +// GetOrgVdcNetworkByNameOrId returns a VDC Network reference if either the network name or ID matches an existing one. +// If no valid external network is found, it returns a nil ExternalNetwork reference and an error +func (vdc *Vdc) GetOrgVdcNetworkByNameOrId(identifier string, refresh bool) (*OrgVDCNetwork, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vdc.GetOrgVdcNetworkByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return vdc.GetOrgVdcNetworkById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*OrgVDCNetwork), err +} + +func (vdc *Vdc) FindStorageProfileReference(name string) (types.Reference, error) { + + err := vdc.Refresh() + if err != nil { + return types.Reference{}, fmt.Errorf("error refreshing vdc: %s", err) + } + for _, sp := range vdc.Vdc.VdcStorageProfiles.VdcStorageProfile { + if sp.Name == name { + return types.Reference{HREF: sp.HREF, Name: sp.Name, ID: sp.ID}, nil + } + } + return types.Reference{}, fmt.Errorf("can't find any VDC Storage_profiles") +} + +// GetDefaultStorageProfileReference should find the default storage profile for a VDC +// Deprecated: unused and implemented in the wrong way. Use adminVdc.GetDefaultStorageProfileReference instead +func (vdc *Vdc) GetDefaultStorageProfileReference(storageprofiles *types.QueryResultRecordsType) (types.Reference, error) { + + err := vdc.Refresh() + if err != nil { + return types.Reference{}, fmt.Errorf("error refreshing vdc: %s", err) + } + for _, spr := range storageprofiles.OrgVdcStorageProfileRecord { + if spr.IsDefaultStorageProfile { + return types.Reference{HREF: spr.HREF, Name: spr.Name}, nil + } + } + return types.Reference{}, fmt.Errorf("can't find Default VDC Storage_profile") +} + +// Deprecated: use GetEdgeGatewayByName +func (vdc *Vdc) FindEdgeGateway(edgegateway string) (EdgeGateway, error) { + + err := vdc.Refresh() + if err != nil { + return EdgeGateway{}, fmt.Errorf("error refreshing vdc: %s", err) + } + for _, av := range vdc.Vdc.Link { + if av.Rel == "edgeGateways" && av.Type == types.MimeQueryRecords { + + query := new(types.QueryResultEdgeGatewayRecordsType) + + _, err := vdc.client.ExecuteRequest(av.HREF, http.MethodGet, + "", "error querying edge gateways: %s", nil, query) + if err != nil { + return EdgeGateway{}, err + } + + var href string + + for _, edge := range query.EdgeGatewayRecord { + if edge.Name == edgegateway { + href = edge.HREF + } + } + + if href == "" { + return EdgeGateway{}, fmt.Errorf("can't find edge gateway with name: %s", edgegateway) + } + + edge := NewEdgeGateway(vdc.client) + + _, err = vdc.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving edge gateway: %s", nil, edge.EdgeGateway) + + // TODO - remove this if a solution is found or once 9.7 is deprecated + // vCD 9.7 has a bug and sometimes it fails to retrieve edge gateway with weird error. + // At this point in time the solution is to retry a few times as it does not fail to + // retrieve when retried. + // + // GitHUB issue - https://github.com/vmware/go-vcloud-director/issues/218 + if err != nil { + util.Logger.Printf("[DEBUG] vCD 9.7 is known to sometimes respond with error on edge gateway (%s) "+ + "retrieval. As a workaround this is done a few times before failing. Retrying: ", edgegateway) + for i := 1; i < 4 && err != nil; i++ { + time.Sleep(200 * time.Millisecond) + util.Logger.Printf("%d ", i) + _, err = vdc.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving edge gateway: %s", nil, edge.EdgeGateway) + } + util.Logger.Printf("\n") + } + + return *edge, err + + } + } + return EdgeGateway{}, fmt.Errorf("can't find Edge Gateway") + +} + +// GetEdgeGatewayByHref retrieves an edge gateway from VDC +// by querying directly its HREF. +// The name passed as parameter is only used for error reporting +func (vdc *Vdc) GetEdgeGatewayByHref(href string) (*EdgeGateway, error) { + if href == "" { + return nil, fmt.Errorf("empty edge gateway HREF") + } + + edge := NewEdgeGateway(vdc.client) + + _, err := vdc.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving edge gateway: %s", nil, edge.EdgeGateway) + + // TODO - remove this if a solution is found or once 9.7 is deprecated + // vCD 9.7 has a bug and sometimes it fails to retrieve edge gateway with weird error. + // At this point in time the solution is to retry a few times as it does not fail to + // retrieve when retried. + // + // GitHUB issue - https://github.com/vmware/go-vcloud-director/issues/218 + if err != nil { + util.Logger.Printf("[DEBUG] vCD 9.7 is known to sometimes respond with error on edge gateway " + + "retrieval. As a workaround this is done a few times before failing. Retrying:") + for i := 1; i < 4 && err != nil; i++ { + time.Sleep(200 * time.Millisecond) + util.Logger.Printf("%d ", i) + _, err = vdc.client.ExecuteRequest(href, http.MethodGet, + "", "error retrieving edge gateway: %s", nil, edge.EdgeGateway) + } + util.Logger.Printf("\n") + } + + if err != nil { + return nil, err + } + return edge, nil +} + +// QueryEdgeGatewayList returns a list of all the edge gateways in a VDC +func (vdc *Vdc) QueryEdgeGatewayList() ([]*types.QueryResultEdgeGatewayRecordType, error) { + results, err := vdc.client.cumulativeQuery(types.QtEdgeGateway, nil, map[string]string{ + "type": types.QtEdgeGateway, + "filter": fmt.Sprintf("orgVdcName==%s", url.QueryEscape(vdc.Vdc.Name)), + "filterEncoded": "true", + }) + if err != nil { + return nil, err + } + return results.Results.EdgeGatewayRecord, nil +} + +// GetEdgeGatewayRecordsType retrieves a list of edge gateways from VDC +// Deprecated: use QueryEdgeGatewayList instead +func (vdc *Vdc) GetEdgeGatewayRecordsType(refresh bool) (*types.QueryResultEdgeGatewayRecordsType, error) { + items, err := vdc.QueryEdgeGatewayList() + if err != nil { + return nil, fmt.Errorf("error retrieving edge gateway list: %s", err) + } + return &types.QueryResultEdgeGatewayRecordsType{ + Total: float64(len(items)), + EdgeGatewayRecord: items, + }, nil +} + +// GetEdgeGatewayByName search the VDC list of edge gateways for a given name. +// If the name matches, it returns a pointer to an edge gateway object. +// On failure, it returns a nil object and an error +func (vdc *Vdc) GetEdgeGatewayByName(name string, refresh bool) (*EdgeGateway, error) { + edgeGatewayList, err := vdc.QueryEdgeGatewayList() + if err != nil { + return nil, fmt.Errorf("error retrieving edge gateways list: %s", err) + } + + for _, edge := range edgeGatewayList { + if edge.Name == name { + return vdc.GetEdgeGatewayByHref(edge.HREF) + } + } + + return nil, ErrorEntityNotFound +} + +// GetEdgeGatewayById search VDC list of edge gateways for a given ID. +// If the id matches, it returns a pointer to an edge gateway object. +// On failure, it returns a nil object and an error +func (vdc *Vdc) GetEdgeGatewayById(id string, refresh bool) (*EdgeGateway, error) { + edgeGatewayList, err := vdc.QueryEdgeGatewayList() + if err != nil { + return nil, fmt.Errorf("error retrieving edge gateways list: %s", err) + } + + for _, edge := range edgeGatewayList { + if equalIds(id, "", edge.HREF) { + return vdc.GetEdgeGatewayByHref(edge.HREF) + } + } + + return nil, ErrorEntityNotFound +} + +// GetEdgeGatewayByNameOrId search the VDC list of edge gateways for a given name or ID. +// If the name or the ID match, it returns a pointer to an edge gateway object. +// On failure, it returns a nil object and an error +func (vdc *Vdc) GetEdgeGatewayByNameOrId(identifier string, refresh bool) (*EdgeGateway, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vdc.GetEdgeGatewayByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return vdc.GetEdgeGatewayById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*EdgeGateway), err +} + +// ComposeRawVApp creates an empty vApp +// Deprecated: use CreateRawVApp instead +func (vdc *Vdc) ComposeRawVApp(name string, description string) error { + vcomp := &types.ComposeVAppParams{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + Deploy: false, + Name: name, + PowerOn: false, + Description: description, + } + + vdcHref, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return fmt.Errorf("error getting vdc href: %s", err) + } + vdcHref.Path += "/action/composeVApp" + + // This call is wrong: /action/composeVApp returns a vApp, not a task + task, err := vdc.client.ExecuteTaskRequest(vdcHref.String(), http.MethodPost, + types.MimeComposeVappParams, "error instantiating a new vApp:: %s", vcomp) + if err != nil { + return fmt.Errorf("error executing task request: %s", err) + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error performing task: %s", err) + } + + return nil +} + +// CreateRawVApp creates an empty vApp +func (vdc *Vdc) CreateRawVApp(name string, description string) (*VApp, error) { + vcomp := &types.ComposeVAppParams{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + Deploy: false, + Name: name, + PowerOn: false, + Description: description, + } + + vdcHref, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return nil, fmt.Errorf("error getting vdc href: %s", err) + } + vdcHref.Path += "/action/composeVApp" + + var vAppContents types.VApp + + _, err = vdc.client.ExecuteRequest(vdcHref.String(), http.MethodPost, + types.MimeComposeVappParams, "error instantiating a new vApp:: %s", vcomp, &vAppContents) + if err != nil { + return nil, fmt.Errorf("error executing task request: %s", err) + } + + if vAppContents.Tasks != nil { + for _, innerTask := range vAppContents.Tasks.Task { + if innerTask != nil { + task := NewTask(vdc.client) + task.Task = innerTask + err = task.WaitTaskCompletion() + if err != nil { + return nil, fmt.Errorf("error performing task: %s", err) + } + } + } + } + + vapp := NewVApp(vdc.client) + vapp.VApp = &vAppContents + + err = vapp.Refresh() + if err != nil { + return nil, err + } + + err = vdc.Refresh() + if err != nil { + return nil, err + } + return vapp, nil +} + +// ComposeVApp creates a vapp with the given template, name, and description +// that uses the storageprofile and networks given. If you want all eulas +// to be accepted set acceptalleulas to true. Returns a successful task +// if completed successfully, otherwise returns an error and an empty task. +// Deprecated: bad implementation +func (vdc *Vdc) ComposeVApp(orgvdcnetworks []*types.OrgVDCNetwork, vapptemplate VAppTemplate, storageprofileref types.Reference, name string, description string, acceptalleulas bool) (Task, error) { + if vapptemplate.VAppTemplate.Children == nil || orgvdcnetworks == nil { + return Task{}, fmt.Errorf("can't compose a new vApp, objects passed are not valid") + } + + // Determine primary network connection index number. We normally depend on it being inherited from vApp template + // but in the case when vApp template does not have network card it would fail on the index being undefined. We + // set the value to 0 (first NIC instead) + primaryNetworkConnectionIndex := 0 + if vapptemplate.VAppTemplate.Children != nil && len(vapptemplate.VAppTemplate.Children.VM) > 0 && + vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection != nil { + primaryNetworkConnectionIndex = vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.PrimaryNetworkConnectionIndex + } + + // Build request XML + vcomp := &types.ComposeVAppParams{ + Ovf: types.XMLNamespaceOVF, + Xsi: types.XMLNamespaceXSI, + Xmlns: types.XMLNamespaceVCloud, + Deploy: false, + Name: name, + PowerOn: false, + Description: description, + InstantiationParams: &types.InstantiationParams{ + NetworkConfigSection: &types.NetworkConfigSection{ + Info: "Configuration parameters for logical networks", + }, + }, + AllEULAsAccepted: acceptalleulas, + SourcedItem: &types.SourcedCompositionItemParam{ + Source: &types.Reference{ + HREF: vapptemplate.VAppTemplate.Children.VM[0].HREF, + Name: vapptemplate.VAppTemplate.Children.VM[0].Name, + }, + InstantiationParams: &types.InstantiationParams{ + NetworkConnectionSection: &types.NetworkConnectionSection{ + Info: "Network config for sourced item", + PrimaryNetworkConnectionIndex: primaryNetworkConnectionIndex, + }, + }, + }, + } + for index, orgvdcnetwork := range orgvdcnetworks { + vcomp.InstantiationParams.NetworkConfigSection.NetworkConfig = append(vcomp.InstantiationParams.NetworkConfigSection.NetworkConfig, + types.VAppNetworkConfiguration{ + NetworkName: orgvdcnetwork.Name, + Configuration: &types.NetworkConfiguration{ + FenceMode: types.FenceModeBridged, + ParentNetwork: &types.Reference{ + HREF: orgvdcnetwork.HREF, + Name: orgvdcnetwork.Name, + Type: orgvdcnetwork.Type, + }, + }, + }, + ) + vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection = append(vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection, + &types.NetworkConnection{ + Network: orgvdcnetwork.Name, + NetworkConnectionIndex: index, + IsConnected: true, + IPAddressAllocationMode: types.IPAllocationModePool, + }, + ) + vcomp.SourcedItem.NetworkAssignment = append(vcomp.SourcedItem.NetworkAssignment, + &types.NetworkAssignment{ + InnerNetwork: orgvdcnetwork.Name, + ContainerNetwork: orgvdcnetwork.Name, + }, + ) + } + if storageprofileref.HREF != "" { + vcomp.SourcedItem.StorageProfile = &storageprofileref + } + + vdcHref, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return Task{}, fmt.Errorf("error getting vdc href: %s", err) + } + vdcHref.Path += "/action/composeVApp" + + // Like ComposeRawVApp, this function returns a task, while it should be returning a vApp + // Since we don't use this function in terraform-provider-vcd, we are not going to + // replace it. + return vdc.client.ExecuteTaskRequest(vdcHref.String(), http.MethodPost, + types.MimeComposeVappParams, "error instantiating a new vApp: %s", vcomp) +} + +// Deprecated: use vdc.GetVAppByName instead +func (vdc *Vdc) FindVAppByName(vapp string) (VApp, error) { + + err := vdc.Refresh() + if err != nil { + return VApp{}, fmt.Errorf("error refreshing vdc: %s", err) + } + + for _, resents := range vdc.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + + if resent.Name == vapp && resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + + newVapp := NewVApp(vdc.client) + + _, err := vdc.client.ExecuteRequest(resent.HREF, http.MethodGet, + "", "error retrieving vApp: %s", nil, newVapp.VApp) + + return *newVapp, err + + } + } + } + return VApp{}, fmt.Errorf("can't find vApp: %s", vapp) +} + +// Deprecated: use vapp.GetVMByName instead +func (vdc *Vdc) FindVMByName(vapp VApp, vm string) (VM, error) { + + err := vdc.Refresh() + if err != nil { + return VM{}, fmt.Errorf("error refreshing vdc: %s", err) + } + + err = vapp.Refresh() + if err != nil { + return VM{}, fmt.Errorf("error refreshing vApp: %s", err) + } + + //vApp Might Not Have Any VMs + + if vapp.VApp.Children == nil { + return VM{}, fmt.Errorf("VApp Has No VMs") + } + + util.Logger.Printf("[TRACE] Looking for VM: %s", vm) + for _, child := range vapp.VApp.Children.VM { + + util.Logger.Printf("[TRACE] Found: %s", child.Name) + if child.Name == vm { + + newVm := NewVM(vdc.client) + + _, err := vdc.client.ExecuteRequest(child.HREF, http.MethodGet, + "", "error retrieving vm: %s", nil, newVm.VM) + + return *newVm, err + } + + } + util.Logger.Printf("[TRACE] Couldn't find VM: %s", vm) + return VM{}, fmt.Errorf("can't find vm: %s", vm) +} + +// Find vm using vApp name and VM name. Returns VMRecord query return type +func (vdc *Vdc) QueryVM(vappName, vmName string) (VMRecord, error) { + + if vmName == "" { + return VMRecord{}, errors.New("error querying vm name is empty") + } + + if vappName == "" { + return VMRecord{}, errors.New("error querying vapp name is empty") + } + + typeMedia := "vm" + if vdc.client.IsSysAdmin { + typeMedia = "adminVM" + } + + results, err := vdc.QueryWithNotEncodedParams(nil, map[string]string{"type": typeMedia, + "filter": "name==" + url.QueryEscape(vmName) + ";containerName==" + url.QueryEscape(vappName), + "filterEncoded": "true"}) + if err != nil { + return VMRecord{}, fmt.Errorf("error querying vm %s", err) + } + + vmResults := results.Results.VMRecord + if vdc.client.IsSysAdmin { + vmResults = results.Results.AdminVMRecord + } + + newVM := NewVMRecord(vdc.client) + + if len(vmResults) == 1 { + newVM.VM = vmResults[0] + } else { + return VMRecord{}, fmt.Errorf("found results %d", len(vmResults)) + } + + return *newVM, nil +} + +// Deprecated: use vdc.GetVAppById instead +func (vdc *Vdc) FindVAppByID(vappid string) (VApp, error) { + + // Horrible hack to fetch a vapp with its id. + // urn:vcloud:vapp:00000000-0000-0000-0000-000000000000 + + err := vdc.Refresh() + if err != nil { + return VApp{}, fmt.Errorf("error refreshing vdc: %s", err) + } + + urnslice := strings.SplitAfter(vappid, ":") + urnid := urnslice[len(urnslice)-1] + + for _, resents := range vdc.Vdc.ResourceEntities { + for _, resent := range resents.ResourceEntity { + + hrefslice := strings.SplitAfter(resent.HREF, "/") + hrefslice = strings.SplitAfter(hrefslice[len(hrefslice)-1], "-") + res := strings.Join(hrefslice[1:], "") + + if res == urnid && resent.Type == "application/vnd.vmware.vcloud.vApp+xml" { + + newVapp := NewVApp(vdc.client) + + _, err := vdc.client.ExecuteRequest(resent.HREF, http.MethodGet, + "", "error retrieving vApp: %s", nil, newVapp.VApp) + + return *newVapp, err + + } + } + } + return VApp{}, fmt.Errorf("can't find vApp") + +} + +// FindMediaImage returns media image found in system using `name` as query. +// Can find a few of them if media with same name exist in different catalogs. +// Deprecated: Use catalog.GetMediaByName() +func (vdc *Vdc) FindMediaImage(mediaName string) (MediaItem, error) { + util.Logger.Printf("[TRACE] Querying medias by name\n") + + mediaResults, err := queryMediaWithFilter(vdc, + fmt.Sprintf("name==%s", url.QueryEscape(mediaName))) + if err != nil { + return MediaItem{}, err + } + + newMediaItem := NewMediaItem(vdc) + + if len(mediaResults) == 1 { + newMediaItem.MediaItem = mediaResults[0] + } + + if len(mediaResults) == 0 { + return MediaItem{}, nil + } + + if len(mediaResults) > 1 { + return MediaItem{}, errors.New("found more than result") + } + + util.Logger.Printf("[TRACE] Found media record by name: %#v \n", mediaResults[0]) + return *newMediaItem, nil +} + +// GetVappByHref returns a vApp reference by running a vCD API call +// If no valid vApp is found, it returns a nil VApp reference and an error +func (vdc *Vdc) GetVAppByHref(vappHref string) (*VApp, error) { + + newVapp := NewVApp(vdc.client) + + _, err := vdc.client.ExecuteRequest(vappHref, http.MethodGet, + "", "error retrieving vApp: %s", nil, newVapp.VApp) + + if err != nil { + return nil, err + } + return newVapp, nil +} + +// GetVappByName returns a vApp reference if the vApp Name matches an existing one. +// If no valid vApp is found, it returns a nil VApp reference and an error +func (vdc *Vdc) GetVAppByName(vappName string, refresh bool) (*VApp, error) { + + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing VDC: %s", err) + } + } + + for _, resourceEntities := range vdc.Vdc.ResourceEntities { + for _, resourceReference := range resourceEntities.ResourceEntity { + if resourceReference.Name == vappName && resourceReference.Type == "application/vnd.vmware.vcloud.vApp+xml" { + return vdc.GetVAppByHref(resourceReference.HREF) + } + } + } + return nil, ErrorEntityNotFound +} + +// GetVappById returns a vApp reference if the vApp ID matches an existing one. +// If no valid vApp is found, it returns a nil VApp reference and an error +func (vdc *Vdc) GetVAppById(id string, refresh bool) (*VApp, error) { + + if refresh { + err := vdc.Refresh() + if err != nil { + return nil, fmt.Errorf("error refreshing VDC: %s", err) + } + } + + for _, resourceEntities := range vdc.Vdc.ResourceEntities { + for _, resourceReference := range resourceEntities.ResourceEntity { + if equalIds(id, resourceReference.ID, resourceReference.HREF) { + return vdc.GetVAppByHref(resourceReference.HREF) + } + } + } + return nil, ErrorEntityNotFound +} + +// GetVappByNameOrId returns a vApp reference if either the vApp name or ID matches an existing one. +// If no valid vApp is found, it returns a nil VApp reference and an error +func (vdc *Vdc) GetVAppByNameOrId(identifier string, refresh bool) (*VApp, error) { + getByName := func(name string, refresh bool) (interface{}, error) { return vdc.GetVAppByName(name, refresh) } + getById := func(id string, refresh bool) (interface{}, error) { return vdc.GetVAppById(id, refresh) } + entity, err := getEntityByNameOrId(getByName, getById, identifier, false) + if entity == nil { + return nil, err + } + return entity.(*VApp), err +} + +// buildNsxvNetworkServiceEndpointURL uses vDC HREF as a base to derive NSX-V based "network +// services" endpoint (eg: https://_hostname_or_ip_/network/services + optionalSuffix) +func (vdc *Vdc) buildNsxvNetworkServiceEndpointURL(optionalSuffix string) (string, error) { + apiEndpoint, err := url.ParseRequestURI(vdc.Vdc.HREF) + if err != nil { + return "", fmt.Errorf("unable to process vDC URL: %s", err) + } + + hostname := apiEndpoint.Scheme + "://" + apiEndpoint.Host + "/network/services" + + if optionalSuffix != "" { + return hostname + optionalSuffix, nil + } + + return hostname, nil +} + +// QueryMediaList retrieves a list of media items for the VDC +func (vdc *Vdc) QueryMediaList() ([]*types.MediaRecordType, error) { + return getExistingMedia(vdc) +} + +// QueryVappVmTemplate Finds VM template using catalog name, vApp template name, VN name in template. Returns types.QueryResultVMRecordType +func (vdc *Vdc) QueryVappVmTemplate(catalogName, vappTemplateName, vmNameInTemplate string) (*types.QueryResultVMRecordType, error) { + + queryType := "vm" + if vdc.client.IsSysAdmin { + queryType = "adminVM" + } + + // this allows to query deployed and not deployed templates + results, err := vdc.QueryWithNotEncodedParams(nil, map[string]string{"type": queryType, + "filter": "catalogName==" + url.QueryEscape(catalogName) + ";containerName==" + url.QueryEscape(vappTemplateName) + ";name==" + url.QueryEscape(vmNameInTemplate) + + ";isVAppTemplate==true;status!=FAILED_CREATION;status!=UNKNOWN;status!=UNRECOGNIZED;status!=UNRESOLVED&links=true;", + "filterEncoded": "true"}) + if err != nil { + return nil, fmt.Errorf("error quering all vApp templates: %s", err) + } + + vmResults := results.Results.VMRecord + if vdc.client.IsSysAdmin { + vmResults = results.Results.AdminVMRecord + } + + if len(vmResults) == 0 { + return nil, fmt.Errorf("[QueryVappVmTemplate] did not find any result with catalog name: %s, "+ + "vApp template name: %s, VM name: %s", catalogName, vappTemplateName, vmNameInTemplate) + } + + if len(vmResults) > 1 { + return nil, fmt.Errorf("[QueryVappVmTemplate] found more than 1 result: %d with with catalog name: %s, "+ + "vApp template name: %s, VM name: %s", len(vmResults), catalogName, vappTemplateName, vmNameInTemplate) + } + + return vmResults[0], nil +} + +// getLinkHref returns a link HREF for a wanted combination of rel and type +func (vdc *Vdc) getLinkHref(rel, linkType string) string { + for _, link := range vdc.Vdc.Link { + if link.Rel == rel && link.Type == linkType { + return link.HREF + } + } + return "" +} + +// GetVappList returns the list of vApps for a VDC +func (vdc *Vdc) GetVappList() []*types.ResourceReference { + var list []*types.ResourceReference + for _, resourceEntities := range vdc.Vdc.ResourceEntities { + for _, resourceReference := range resourceEntities.ResourceEntity { + if resourceReference.Type == types.MimeVApp { + list = append(list, resourceReference) + } + } + } + return list +} + +// CreateStandaloneVmAsync starts a standalone VM creation without a template, returning a task +func (vdc *Vdc) CreateStandaloneVmAsync(params *types.CreateVmParams) (Task, error) { + util.Logger.Printf("[TRACE] Vdc.CreateStandaloneVmAsync - Creating VM ") + + if vdc.Vdc.HREF == "" { + return Task{}, fmt.Errorf("cannot create VM, Object VDC is empty") + } + + href := "" + for _, link := range vdc.Vdc.Link { + if link.Type == types.MimeCreateVmParams && link.Rel == "add" { + href = link.HREF + break + } + } + if href == "" { + return Task{}, fmt.Errorf("error retrieving VM creation link from VDC %s", vdc.Vdc.Name) + } + if params == nil { + return Task{}, fmt.Errorf("empty parameters passed to standalone VM creation") + } + params.XmlnsOvf = types.XMLNamespaceOVF + + return vdc.client.ExecuteTaskRequest(href, http.MethodPost, types.MimeCreateVmParams, "error creating standalone VM: %s", params) +} + +// getVmFromTask finds a VM from a running standalone VM creation task +// It retrieves the VM owner (the hidden vApp), and from that one finds the new VM +func (vdc *Vdc) getVmFromTask(task Task, name string) (*VM, error) { + owner := task.Task.Owner.HREF + if owner == "" { + return nil, fmt.Errorf("task owner is null for VM %s", name) + } + vapp, err := vdc.GetVAppByHref(owner) + if err != nil { + return nil, err + } + if vapp.VApp.Children == nil { + return nil, ErrorEntityNotFound + } + if len(vapp.VApp.Children.VM) == 0 { + return nil, fmt.Errorf("vApp %s contains no VMs", vapp.VApp.Name) + } + if len(vapp.VApp.Children.VM) > 1 { + return nil, fmt.Errorf("vApp %s contains more than one VM", vapp.VApp.Name) + } + for _, child := range vapp.VApp.Children.VM { + util.Logger.Printf("[TRACE] Looking at: %s", child.Name) + return vapp.client.GetVMByHref(child.HREF) + } + return nil, ErrorEntityNotFound +} + +// CreateStandaloneVm creates a standalone VM without a template +func (vdc *Vdc) CreateStandaloneVm(params *types.CreateVmParams) (*VM, error) { + + task, err := vdc.CreateStandaloneVmAsync(params) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, err + } + return vdc.getVmFromTask(task, params.Name) +} + +// QueryVmByName finds a standalone VM by name +// The search fails either if there are more VMs with the wanted name, or if there are none +// It can also retrieve a standard VM (created from vApp) +func (vdc *Vdc) QueryVmByName(name string) (*VM, error) { + vmList, err := vdc.QueryVmList(types.VmQueryFilterOnlyDeployed) + if err != nil { + return nil, err + } + var foundVM []*types.QueryResultVMRecordType + for _, vm := range vmList { + if vm.Name == name { + foundVM = append(foundVM, vm) + } + } + if len(foundVM) == 0 { + return nil, ErrorEntityNotFound + } + if len(foundVM) > 1 { + return nil, fmt.Errorf("more than one VM found with name %s", name) + } + return vdc.client.GetVMByHref(foundVM[0].HREF) +} + +// QueryVmById retrieves a standalone VM by ID in an Org +// It can also retrieve a standard VM (created from vApp) +func (org *Org) QueryVmById(id string) (*VM, error) { + return queryVmById(id, org.client, org.QueryVmList) +} + +// QueryVmById retrieves a standalone VM by ID in a Vdc +// It can also retrieve a standard VM (created from vApp) +func (vdc *Vdc) QueryVmById(id string) (*VM, error) { + return queryVmById(id, vdc.client, vdc.QueryVmList) +} + +// queryVmListFunc +type queryVmListFunc func(filter types.VmQueryFilter) ([]*types.QueryResultVMRecordType, error) + +// queryVmById is shared between org.QueryVmById and vdc.QueryVmById which allow to search for VM +// in different scope (Org or VDC) +func queryVmById(id string, client *Client, queryFunc queryVmListFunc) (*VM, error) { + vmList, err := queryFunc(types.VmQueryFilterOnlyDeployed) + if err != nil { + return nil, err + } + var foundVM []*types.QueryResultVMRecordType + for _, vm := range vmList { + if equalIds(id, vm.ID, vm.HREF) { + foundVM = append(foundVM, vm) + } + } + if len(foundVM) == 0 { + return nil, ErrorEntityNotFound + } + if len(foundVM) > 1 { + return nil, fmt.Errorf("more than one VM found with ID %s", id) + } + return client.GetVMByHref(foundVM[0].HREF) +} + +// CreateStandaloneVMFromTemplateAsync starts a standalone VM creation using a template +func (vdc *Vdc) CreateStandaloneVMFromTemplateAsync(params *types.InstantiateVmTemplateParams) (Task, error) { + + util.Logger.Printf("[TRACE] Vdc.CreateStandaloneVMFromTemplateAsync - Creating VM") + + if vdc.Vdc.HREF == "" { + return Task{}, fmt.Errorf("cannot create VM, provided VDC is empty") + } + + href := "" + for _, link := range vdc.Vdc.Link { + if link.Type == types.MimeInstantiateVmTemplateParams && link.Rel == "add" { + href = link.HREF + break + } + } + if href == "" { + return Task{}, fmt.Errorf("error retrieving VM instantiate from template link from VDC %s", vdc.Vdc.Name) + } + + if params.Name == "" { + return Task{}, fmt.Errorf("[CreateStandaloneVMFromTemplateAsync] missing VM name") + } + if params.SourcedVmTemplateItem == nil { + return Task{}, fmt.Errorf("[CreateStandaloneVMFromTemplateAsync] missing SourcedVmTemplateItem") + } + if params.SourcedVmTemplateItem.Source == nil { + return Task{}, fmt.Errorf("[CreateStandaloneVMFromTemplateAsync] missing vApp template Source") + } + if params.SourcedVmTemplateItem.Source.HREF == "" { + return Task{}, fmt.Errorf("[CreateStandaloneVMFromTemplateAsync] empty HREF in vApp template Source") + } + params.XmlnsOvf = types.XMLNamespaceOVF + + return vdc.client.ExecuteTaskRequest(href, http.MethodPost, types.MimeInstantiateVmTemplateParams, "error creating standalone VM from template: %s", params) +} + +// CreateStandaloneVMFromTemplate creates a standalone VM from a template +func (vdc *Vdc) CreateStandaloneVMFromTemplate(params *types.InstantiateVmTemplateParams) (*VM, error) { + + task, err := vdc.CreateStandaloneVMFromTemplateAsync(params) + if err != nil { + return nil, err + } + err = task.WaitTaskCompletion() + if err != nil { + return nil, err + } + return vdc.getVmFromTask(task, params.Name) +} + +// GetCapabilities allows to retrieve a list of VDC capabilities. It has a list of values. Some particularly useful are: +// * networkProvider - overlay stack responsible for providing network functionality. (NSX_V or NSX_T) +// * crossVdc - supports cross vDC network creation +func (vdc *Vdc) GetCapabilities() ([]types.VdcCapability, error) { + if vdc.Vdc.ID == "" { + return nil, fmt.Errorf("VDC ID must be set to get capabilities") + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcCapabilities + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, url.QueryEscape(vdc.Vdc.ID))) + if err != nil { + return nil, err + } + + capabilities := make([]types.VdcCapability, 0) + err = vdc.client.OpenApiGetAllItems(minimumApiVersion, urlRef, nil, &capabilities, nil) + if err != nil { + return nil, err + } + return capabilities, nil +} + +// IsNsxt is a convenience function to check if VDC is backed by NSX-T pVdc +// If error occurs - it returns false +func (vdc *Vdc) IsNsxt() bool { + vdcCapabilities, err := vdc.GetCapabilities() + if err != nil { + return false + } + + networkProviderCapability := getCapabilityValue(vdcCapabilities, "networkProvider") + return networkProviderCapability == types.VdcCapabilityNetworkProviderNsxt +} + +// IsNsxv is a convenience function to check if VDC is backed by NSX-V pVdc +// If error occurs - it returns false +func (vdc *Vdc) IsNsxv() bool { + vdcCapabilities, err := vdc.GetCapabilities() + if err != nil { + return false + } + + networkProviderCapability := getCapabilityValue(vdcCapabilities, "networkProvider") + return networkProviderCapability == types.VdcCapabilityNetworkProviderNsxv +} + +// getCapabilityValue helps to lookup a specific capability in []types.VdcCapability by provided fieldName +func getCapabilityValue(capabilities []types.VdcCapability, fieldName string) string { + for _, field := range capabilities { + if field.Name == fieldName { + return field.Value.(string) + } + } + + return "" +} + +func (vdc *Vdc) getParentOrg() (organization, error) { + for _, vdcLink := range vdc.Vdc.Link { + if vdcLink.Rel != "up" { + continue + } + switch vdcLink.Type { + case types.MimeOrg: + org, err := getOrgByHref(vdc.client, vdcLink.HREF) + if err != nil { + return nil, err + } + return org, nil + case types.MimeAdminOrg: + adminOrg, err := getAdminOrgByHref(vdc.client, vdcLink.HREF) + if err != nil { + return nil, err + } + return adminOrg, nil + + default: + continue + } + } + return nil, fmt.Errorf("no parent found for VDC %s", vdc.Vdc.Name) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc_group.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc_group.go new file mode 100644 index 000000000..52a84366e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdc_group.go @@ -0,0 +1,502 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "github.com/vmware/go-vcloud-director/v2/types/v56" + "net/url" +) + +// VdcGroup is a structure defining a VdcGroup in Organization +type VdcGroup struct { + VdcGroup *types.VdcGroup + Href string + client *Client + parent organization +} + +// CreateNsxtVdcGroup create NSX-T VDC group with provided VDC IDs. +// More generic creation method available also - CreateVdcGroup +func (adminOrg *AdminOrg) CreateNsxtVdcGroup(name, description, startingVdcId string, participatingVdcIds []string) (*VdcGroup, error) { + participatingVdcs, err := composeParticipatingOrgVdcs(adminOrg, startingVdcId, participatingVdcIds) + if err != nil { + return nil, err + } + + vdcGroupConfig := &types.VdcGroup{} + vdcGroupConfig.OrgId = adminOrg.orgId() + vdcGroupConfig.Name = name + vdcGroupConfig.Description = description + vdcGroupConfig.ParticipatingOrgVdcs = participatingVdcs + vdcGroupConfig.LocalEgress = false + vdcGroupConfig.UniversalNetworkingEnabled = false + vdcGroupConfig.NetworkProviderType = "NSX_T" + vdcGroupConfig.Type = "LOCAL" + vdcGroupConfig.ParticipatingOrgVdcs = participatingVdcs + return adminOrg.CreateVdcGroup(vdcGroupConfig) +} + +// composeParticipatingOrgVdcs converts fetched candidate VDCs to []types.ParticipatingOrgVdcs +// returns error also in case participatingVdcId not found as candidate VDC. +func composeParticipatingOrgVdcs(adminOrg *AdminOrg, startingVdcId string, participatingVdcIds []string) ([]types.ParticipatingOrgVdcs, error) { + candidateVdcs, err := adminOrg.GetAllNsxtVdcGroupCandidates(startingVdcId, nil) + if err != nil { + return nil, err + } + participatingVdcs := []types.ParticipatingOrgVdcs{} + var foundParticipatingVdcsIds []string + for _, candidateVdc := range candidateVdcs { + if contains(candidateVdc.Id, participatingVdcIds) { + participatingVdcs = append(participatingVdcs, types.ParticipatingOrgVdcs{ + OrgRef: candidateVdc.OrgRef, + SiteRef: candidateVdc.SiteRef, + VdcRef: types.OpenApiReference{ + ID: candidateVdc.Id, + }, + FaultDomainTag: candidateVdc.FaultDomainTag, + NetworkProviderScope: candidateVdc.NetworkProviderScope, + }) + foundParticipatingVdcsIds = append(foundParticipatingVdcsIds, candidateVdc.Id) + } + } + + if len(participatingVdcs) != len(participatingVdcIds) { + var notFoundVdcs []string + for _, participatingVdcId := range participatingVdcIds { + if !contains(participatingVdcId, foundParticipatingVdcsIds) { + notFoundVdcs = append(notFoundVdcs, participatingVdcId) + } + } + return nil, fmt.Errorf("VDC IDs are not found as Candidate VDCs: %s", notFoundVdcs) + } + + return participatingVdcs, nil +} + +// contains tells whether slice of string contains item. +func contains(item string, slice []string) bool { + for _, n := range slice { + if item == n { + return true + } + } + return false +} + +// CreateVdcGroup create VDC group with provided VDC ref. +// Only supports NSX-T VDCs. +func (adminOrg *AdminOrg) CreateVdcGroup(vdcGroup *types.VdcGroup) (*VdcGroup, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + return createVdcGroup(adminOrg, vdcGroup, getTenantContextHeader(tenantContext)) +} + +// createVdcGroup create VDC group with provided VDC ref. +// Only supports NSX-T VDCs. +func createVdcGroup(adminOrg *AdminOrg, vdcGroup *types.VdcGroup, + additionalHeader map[string]string) (*VdcGroup, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups + apiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + typeResponse := &VdcGroup{ + VdcGroup: &types.VdcGroup{}, + client: adminOrg.client, + Href: urlRef.String(), + parent: adminOrg, + } + + err = adminOrg.client.OpenApiPostItem(apiVersion, urlRef, nil, + vdcGroup, typeResponse.VdcGroup, additionalHeader) + if err != nil { + return nil, err + } + + return typeResponse, nil +} + +// GetAllNsxtVdcGroupCandidates returns NSXT candidate VDCs for VDC group +func (adminOrg *AdminOrg) GetAllNsxtVdcGroupCandidates(startingVdcId string, queryParameters url.Values) ([]*types.CandidateVdc, error) { + queryParams := copyOrNewUrlValues(queryParameters) + queryParams = queryParameterFilterAnd("_context==LOCAL", queryParams) + queryParams = queryParameterFilterAnd(fmt.Sprintf("_context==%s", startingVdcId), queryParams) + queryParams.Add("filterEncoded", "true") + queryParams.Add("links", "true") + return adminOrg.GetAllVdcGroupCandidates(queryParams) +} + +// GetAllVdcGroupCandidates returns candidate VDCs for VDC group +func (adminOrg *AdminOrg) GetAllVdcGroupCandidates(queryParameters url.Values) ([]*types.CandidateVdc, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsCandidateVdcs + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + responses := []*types.CandidateVdc{} + err = adminOrg.client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &responses, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, err + } + + return responses, nil +} + +// Delete deletes VDC group +func (vdcGroup *VdcGroup) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups + minimumApiVersion, err := vdcGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if vdcGroup.VdcGroup.Id == "" { + return fmt.Errorf("cannot delete VDC group without id") + } + + urlRef, err := vdcGroup.client.OpenApiBuildEndpoint(endpoint, vdcGroup.VdcGroup.Id) + if err != nil { + return err + } + + err = vdcGroup.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting VDC group: %s", err) + } + + return nil +} + +// GetAllVdcGroups retrieves all VDC groups. Query parameters can be supplied to perform additional filtering +func (adminOrg *AdminOrg) GetAllVdcGroups(queryParameters url.Values) ([]*VdcGroup, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + responses := []*types.VdcGroup{} + err = adminOrg.client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &responses, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, err + } + + var wrappedVdcGroups []*VdcGroup + for _, response := range responses { + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint, response.Id) + if err != nil { + return nil, err + } + wrappedVdcGroup := &VdcGroup{ + VdcGroup: response, + client: adminOrg.client, + Href: urlRef.String(), + parent: adminOrg, + } + wrappedVdcGroups = append(wrappedVdcGroups, wrappedVdcGroup) + } + + return wrappedVdcGroups, nil +} + +// GetVdcGroupByName retrieves VDC group by given name +// When the name contains commas, semicolons or asterisks, the encoding is rejected by the API in VCD 10.2 version. +// For this reason, when one or more commas, semicolons or asterisks are present we run the search brute force, +// by fetching all VDC groups and comparing the names. Yet, this not needed anymore in VCD 10.3 version. +// Also, url.QueryEscape as well as url.Values.Encode() both encode the space as a + character. So we use +// search brute force too. Reference to issue: +// https://github.com/golang/go/issues/4013 +// https://github.com/czos/goamz/pull/11/files +func (adminOrg *AdminOrg) GetVdcGroupByName(name string) (*VdcGroup, error) { + slowSearch, params, err := shouldDoSlowSearch("name", name, adminOrg.client) + if err != nil { + return nil, err + } + + var foundVdcGroups []*VdcGroup + vdcGroups, err := adminOrg.GetAllVdcGroups(params) + if err != nil { + return nil, err + } + if len(vdcGroups) == 0 { + return nil, ErrorEntityNotFound + } + foundVdcGroups = append(foundVdcGroups, vdcGroups[0]) + + if slowSearch { + foundVdcGroups = nil + for _, vdcGroup := range vdcGroups { + if vdcGroup.VdcGroup.Name == name { + foundVdcGroups = append(foundVdcGroups, vdcGroup) + } + } + if len(foundVdcGroups) == 0 { + return nil, ErrorEntityNotFound + } + if len(foundVdcGroups) > 1 { + return nil, fmt.Errorf("more than one VDC group found with name '%s'", name) + } + } + + if len(vdcGroups) > 1 && !slowSearch { + return nil, fmt.Errorf("more than one VDC group found with name '%s'", name) + } + + return foundVdcGroups[0], nil +} + +// GetVdcGroupById Returns VDC group using provided ID +func (adminOrg *AdminOrg) GetVdcGroupById(id string) (*VdcGroup, error) { + tenantContext, err := adminOrg.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups + minimumApiVersion, err := adminOrg.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty VDC group ID") + } + + urlRef, err := adminOrg.client.OpenApiBuildEndpoint(endpoint, id) + if err != nil { + return nil, err + } + + vdcGroup := &VdcGroup{ + VdcGroup: &types.VdcGroup{}, + client: adminOrg.client, + Href: urlRef.String(), + parent: adminOrg, + } + + err = adminOrg.client.OpenApiGetItem(minimumApiVersion, urlRef, nil, vdcGroup.VdcGroup, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, err + } + + return vdcGroup, nil +} + +// Update updates existing Vdc group. Allows changing only name and description and participating VCDs +// Not restrictive update method also available - GenericUpdate +func (vdcGroup *VdcGroup) Update(name, description string, participatingOrgVddIs []string) (*VdcGroup, error) { + + vdcGroup.VdcGroup.Name = name + vdcGroup.VdcGroup.Description = description + + participatingOrgVdcs, err := composeParticipatingOrgVdcs(vdcGroup.parent.fullObject().(*AdminOrg), vdcGroup.VdcGroup.Id, participatingOrgVddIs) + if err != nil { + return nil, err + } + vdcGroup.VdcGroup.ParticipatingOrgVdcs = participatingOrgVdcs + + return vdcGroup.GenericUpdate() +} + +// GenericUpdate updates existing Vdc group. API allows changing only name and description and participating VCDs +func (vdcGroup *VdcGroup) GenericUpdate() (*VdcGroup, error) { + tenantContext, err := vdcGroup.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroups + minimumApiVersion, err := vdcGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if vdcGroup.VdcGroup.Id == "" { + return nil, fmt.Errorf("cannot update VDC group without id") + } + + urlRef, err := vdcGroup.client.OpenApiBuildEndpoint(endpoint, vdcGroup.VdcGroup.Id) + if err != nil { + return nil, err + } + + returnVdcGroup := &VdcGroup{ + VdcGroup: &types.VdcGroup{}, + client: vdcGroup.client, + Href: vdcGroup.Href, + parent: vdcGroup.parent, + } + + err = vdcGroup.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, vdcGroup.VdcGroup, + returnVdcGroup.VdcGroup, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, fmt.Errorf("error updating VDC group: %s", err) + } + + return returnVdcGroup, nil +} + +// UpdateDfwPolicies updates distributed firewall policies +func (vdcGroup *VdcGroup) UpdateDfwPolicies(dfwPolicies types.DfwPolicies) (*VdcGroup, error) { + tenantContext, err := vdcGroup.getTenantContext() + if err != nil { + return nil, err + } + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsDfwPolicies + minimumApiVersion, err := vdcGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if vdcGroup.VdcGroup.Id == "" { + return nil, fmt.Errorf("cannot update VDC group Dfw policies without id") + } + + urlRef, err := vdcGroup.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, vdcGroup.VdcGroup.Id)) + if err != nil { + return nil, err + } + + err = vdcGroup.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, dfwPolicies, + nil, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, fmt.Errorf("error updating VDC group Dfw policies: %s", err) + } + + adminOrg := vdcGroup.parent.fullObject().(*AdminOrg) + return adminOrg.GetVdcGroupById(vdcGroup.VdcGroup.Id) +} + +// UpdateDefaultDfwPolicies updates distributed firewall default policies +func (vdcGroup *VdcGroup) UpdateDefaultDfwPolicies(defaultDfwPolicies types.DefaultPolicy) (*VdcGroup, error) { + tenantContext, err := vdcGroup.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsDfwDefaultPolicies + minimumApiVersion, err := vdcGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if vdcGroup.VdcGroup.Id == "" { + return nil, fmt.Errorf("cannot update VDC group default DFW policies without id") + } + + urlRef, err := vdcGroup.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, vdcGroup.VdcGroup.Id)) + if err != nil { + return nil, err + } + + err = vdcGroup.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, defaultDfwPolicies, + nil, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, fmt.Errorf("error updating VDC group default DFW policies: %s", err) + } + + adminOrg := vdcGroup.parent.fullObject().(*AdminOrg) + return adminOrg.GetVdcGroupById(vdcGroup.VdcGroup.Id) +} + +// ActivateDfw activates distributed firewall +func (vdcGroup *VdcGroup) ActivateDfw() (*VdcGroup, error) { + return vdcGroup.UpdateDfwPolicies(types.DfwPolicies{ + Enabled: true, + }) +} + +// DeactivateDfw deactivates distributed firewall +func (vdcGroup *VdcGroup) DeactivateDfw() (*VdcGroup, error) { + return vdcGroup.UpdateDfwPolicies(types.DfwPolicies{ + Enabled: false, + }) +} + +// GetDfwPolicies retrieves all distributed firewall policies +func (vdcGroup *VdcGroup) GetDfwPolicies() (*types.DfwPolicies, error) { + tenantContext, err := vdcGroup.getTenantContext() + if err != nil { + return nil, err + } + + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcGroupsDfwPolicies + minimumApiVersion, err := vdcGroup.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdcGroup.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, vdcGroup.VdcGroup.Id)) + if err != nil { + return nil, err + } + + response := types.DfwPolicies{} + err = vdcGroup.client.OpenApiGetItem(minimumApiVersion, urlRef, nil, &response, getTenantContextHeader(tenantContext)) + if err != nil { + return nil, err + } + + return &response, nil +} + +// EnableDefaultPolicy activates default dfw policy +func (vdcGroup *VdcGroup) EnableDefaultPolicy() (*VdcGroup, error) { + dfwPolicies, err := vdcGroup.GetDfwPolicies() + if err != nil { + return nil, err + } + + if dfwPolicies.DefaultPolicy == nil { + return nil, fmt.Errorf("DFW has to be enabled before changing Default policy") + } + dfwPolicies.DefaultPolicy.Enabled = takeBoolPointer(true) + return vdcGroup.UpdateDefaultDfwPolicies(*dfwPolicies.DefaultPolicy) +} + +// DisableDefaultPolicy deactivates default dfw policy +func (vdcGroup *VdcGroup) DisableDefaultPolicy() (*VdcGroup, error) { + dfwPolicies, err := vdcGroup.GetDfwPolicies() + if err != nil { + return nil, err + } + + if dfwPolicies.DefaultPolicy == nil { + return nil, fmt.Errorf("DFW has to be enabled before changing Default policy") + } + dfwPolicies.DefaultPolicy.Enabled = takeBoolPointer(false) + return vdcGroup.UpdateDefaultDfwPolicies(*dfwPolicies.DefaultPolicy) +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdccomputepolicy.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdccomputepolicy.go new file mode 100644 index 000000000..a7816a30c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vdccomputepolicy.go @@ -0,0 +1,253 @@ +package govcd + +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +import ( + "fmt" + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" + "net/http" + "net/url" +) + +// In UI called VM sizing policy. In API VDC compute policy +type VdcComputePolicy struct { + VdcComputePolicy *types.VdcComputePolicy + Href string + client *Client +} + +// GetVdcComputePolicyById retrieves VDC compute policy by given ID +func (org *AdminOrg) GetVdcComputePolicyById(id string) (*VdcComputePolicy, error) { + return getVdcComputePolicyById(org.client, id) +} + +// GetVdcComputePolicyById retrieves VDC compute policy by given ID +func (org *Org) GetVdcComputePolicyById(id string) (*VdcComputePolicy, error) { + return getVdcComputePolicyById(org.client, id) +} + +// getVdcComputePolicyById retrieves VDC compute policy by given ID +func getVdcComputePolicyById(client *Client, id string) (*VdcComputePolicy, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if id == "" { + return nil, fmt.Errorf("empty VDC id") + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint, id) + + if err != nil { + return nil, err + } + + vdcComputePolicy := &VdcComputePolicy{ + VdcComputePolicy: &types.VdcComputePolicy{}, + Href: urlRef.String(), + client: client, + } + + err = client.OpenApiGetItem(minimumApiVersion, urlRef, nil, vdcComputePolicy.VdcComputePolicy, nil) + if err != nil { + return nil, err + } + + return vdcComputePolicy, nil +} + +// GetAllVdcComputePolicies retrieves all VDC compute policies using OpenAPI endpoint. Query parameters can be supplied to perform additional +// filtering +func (org *AdminOrg) GetAllVdcComputePolicies(queryParameters url.Values) ([]*VdcComputePolicy, error) { + return getAllVdcComputePolicies(org.client, queryParameters) +} + +// GetAllVdcComputePolicies retrieves all VDC compute policies using OpenAPI endpoint. Query parameters can be supplied to perform additional +// filtering +func (org *Org) GetAllVdcComputePolicies(queryParameters url.Values) ([]*VdcComputePolicy, error) { + return getAllVdcComputePolicies(org.client, queryParameters) +} + +// getAllVdcComputePolicies retrieves all VDC compute policies using OpenAPI endpoint. Query parameters can be supplied to perform additional +// filtering +func getAllVdcComputePolicies(client *Client, queryParameters url.Values) ([]*VdcComputePolicy, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies + minimumApiVersion, err := client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + responses := []*types.VdcComputePolicy{{}} + + err = client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &responses, nil) + if err != nil { + return nil, err + } + + var wrappedVdcComputePolicies []*VdcComputePolicy + for _, response := range responses { + wrappedVdcComputePolicy := &VdcComputePolicy{ + client: client, + VdcComputePolicy: response, + } + wrappedVdcComputePolicies = append(wrappedVdcComputePolicies, wrappedVdcComputePolicy) + } + + return wrappedVdcComputePolicies, nil +} + +// CreateVdcComputePolicy creates a new VDC Compute Policy using OpenAPI endpoint +func (org *AdminOrg) CreateVdcComputePolicy(newVdcComputePolicy *types.VdcComputePolicy) (*VdcComputePolicy, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies + minimumApiVersion, err := org.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := org.client.OpenApiBuildEndpoint(endpoint) + if err != nil { + return nil, err + } + + returnVdcComputePolicy := &VdcComputePolicy{ + VdcComputePolicy: &types.VdcComputePolicy{}, + client: org.client, + } + + err = org.client.OpenApiPostItem(minimumApiVersion, urlRef, nil, newVdcComputePolicy, returnVdcComputePolicy.VdcComputePolicy, nil) + if err != nil { + return nil, fmt.Errorf("error creating VDC compute policy: %s", err) + } + + return returnVdcComputePolicy, nil +} + +// Update existing VDC compute policy +func (vdcComputePolicy *VdcComputePolicy) Update() (*VdcComputePolicy, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies + minimumApiVersion, err := vdcComputePolicy.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + if vdcComputePolicy.VdcComputePolicy.ID == "" { + return nil, fmt.Errorf("cannot update VDC compute policy without ID") + } + + urlRef, err := vdcComputePolicy.client.OpenApiBuildEndpoint(endpoint, vdcComputePolicy.VdcComputePolicy.ID) + if err != nil { + return nil, err + } + + returnVdcComputePolicy := &VdcComputePolicy{ + VdcComputePolicy: &types.VdcComputePolicy{}, + client: vdcComputePolicy.client, + } + + err = vdcComputePolicy.client.OpenApiPutItem(minimumApiVersion, urlRef, nil, vdcComputePolicy.VdcComputePolicy, returnVdcComputePolicy.VdcComputePolicy, nil) + if err != nil { + return nil, fmt.Errorf("error updating VDC compute policy: %s", err) + } + + return returnVdcComputePolicy, nil +} + +// Delete deletes VDC compute policy +func (vdcComputePolicy *VdcComputePolicy) Delete() error { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcComputePolicies + minimumApiVersion, err := vdcComputePolicy.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return err + } + + if vdcComputePolicy.VdcComputePolicy.ID == "" { + return fmt.Errorf("cannot delete VDC compute policy without id") + } + + urlRef, err := vdcComputePolicy.client.OpenApiBuildEndpoint(endpoint, vdcComputePolicy.VdcComputePolicy.ID) + if err != nil { + return err + } + + err = vdcComputePolicy.client.OpenApiDeleteItem(minimumApiVersion, urlRef, nil, nil) + + if err != nil { + return fmt.Errorf("error deleting VDC compute policy: %s", err) + } + + return nil +} + +// GetAllAssignedVdcComputePolicies retrieves all VDC assigned compute policies using OpenAPI endpoint. Query parameters can be supplied to perform additional +// filtering +func (vdc *AdminVdc) GetAllAssignedVdcComputePolicies(queryParameters url.Values) ([]*VdcComputePolicy, error) { + endpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointVdcAssignedComputePolicies + minimumApiVersion, err := vdc.client.checkOpenApiEndpointCompatibility(endpoint) + if err != nil { + return nil, err + } + + urlRef, err := vdc.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, vdc.AdminVdc.ID)) + if err != nil { + return nil, err + } + + responses := []*types.VdcComputePolicy{{}} + + err = vdc.client.OpenApiGetAllItems(minimumApiVersion, urlRef, queryParameters, &responses, nil) + if err != nil { + return nil, err + } + + var wrappedVdcComputePolicies []*VdcComputePolicy + for _, response := range responses { + wrappedVdcComputePolicy := &VdcComputePolicy{ + client: vdc.client, + VdcComputePolicy: response, + } + wrappedVdcComputePolicies = append(wrappedVdcComputePolicies, wrappedVdcComputePolicy) + } + + return wrappedVdcComputePolicies, nil +} + +// SetAssignedComputePolicies assign(set) compute policies. +func (vdc *AdminVdc) SetAssignedComputePolicies(computePolicyReferences types.VdcComputePolicyReferences) (*types.VdcComputePolicyReferences, error) { + util.Logger.Printf("[TRACE] Set Compute Policies started") + + if !vdc.client.IsSysAdmin { + return nil, fmt.Errorf("functionality requires System Administrator privileges") + } + + adminVdcPolicyHREF, err := url.ParseRequestURI(vdc.AdminVdc.HREF) + if err != nil { + return nil, fmt.Errorf("error parsing VDC URL: %s", err) + } + + vdcId, err := GetUuidFromHref(vdc.AdminVdc.HREF, true) + if err != nil { + return nil, fmt.Errorf("unable to get vdc ID from HREF: %s", err) + } + adminVdcPolicyHREF.Path = "/api/admin/vdc/" + vdcId + "/computePolicies" + + returnedVdcComputePolicies := &types.VdcComputePolicyReferences{} + computePolicyReferences.Xmlns = types.XMLNamespaceVCloud + + _, err = vdc.client.ExecuteRequest(adminVdcPolicyHREF.String(), http.MethodPut, + types.MimeVdcComputePolicyReferences, "error setting compute policies for VDC: %s", computePolicyReferences, returnedVdcComputePolicies) + if err != nil { + return nil, err + } + + return returnedVdcComputePolicies, nil +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vm.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vm.go new file mode 100644 index 000000000..06dd5a48c --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/govcd/vm.go @@ -0,0 +1,1815 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package govcd + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" +) + +type VM struct { + VM *types.Vm + client *Client +} + +type VMRecord struct { + VM *types.QueryResultVMRecordType + client *Client +} + +func NewVM(cli *Client) *VM { + return &VM{ + VM: new(types.Vm), + client: cli, + } +} + +// NewVMRecord creates an instance with reference to types.QueryResultVMRecordType +func NewVMRecord(cli *Client) *VMRecord { + return &VMRecord{ + VM: new(types.QueryResultVMRecordType), + client: cli, + } +} + +func (vm *VM) GetStatus() (string, error) { + err := vm.Refresh() + if err != nil { + return "", fmt.Errorf("error refreshing VM: %s", err) + } + return types.VAppStatuses[vm.VM.Status], nil +} + +// IsDeployed checks if the VM is deployed or not +func (vm *VM) IsDeployed() (bool, error) { + err := vm.Refresh() + if err != nil { + return false, fmt.Errorf("error refreshing VM: %s", err) + } + return vm.VM.Deployed, nil +} + +func (vm *VM) Refresh() error { + + if vm.VM.HREF == "" { + return fmt.Errorf("cannot refresh VM, Object is empty") + } + + refreshUrl := vm.VM.HREF + + // Empty struct before a new unmarshal, otherwise we end up with duplicate + // elements in slices. + vm.VM = &types.Vm{} + + _, err := vm.client.ExecuteRequest(refreshUrl, http.MethodGet, "", "error refreshing VM: %s", nil, vm.VM) + + // The request was successful + return err +} + +// GetVirtualHardwareSection returns the virtual hardware items attached to a VM +func (vm *VM) GetVirtualHardwareSection() (*types.VirtualHardwareSection, error) { + + virtualHardwareSection := &types.VirtualHardwareSection{} + + if vm.VM.HREF == "" { + return nil, fmt.Errorf("cannot refresh, invalid reference url") + } + + _, err := vm.client.ExecuteRequest(vm.VM.HREF+"/virtualHardwareSection/", http.MethodGet, + types.MimeVirtualHardwareSection, "error retrieving virtual hardware: %s", nil, virtualHardwareSection) + + // The request was successful + return virtualHardwareSection, err +} + +// GetNetworkConnectionSection returns current networks attached to VM +// +// The slice of NICs is not necessarily ordered by NIC index +func (vm *VM) GetNetworkConnectionSection() (*types.NetworkConnectionSection, error) { + + networkConnectionSection := &types.NetworkConnectionSection{} + + if vm.VM.HREF == "" { + return networkConnectionSection, fmt.Errorf("cannot retrieve network when VM HREF is unset") + } + + _, err := vm.client.ExecuteRequest(vm.VM.HREF+"/networkConnectionSection/", http.MethodGet, + types.MimeNetworkConnectionSection, "error retrieving network connection: %s", nil, networkConnectionSection) + + // The request was successful + return networkConnectionSection, err +} + +// UpdateNetworkConnectionSection applies network configuration of types.NetworkConnectionSection for the VM +// Runs synchronously, VM is ready for another operation after this function returns. +func (vm *VM) UpdateNetworkConnectionSection(networks *types.NetworkConnectionSection) error { + if vm.VM.HREF == "" { + return fmt.Errorf("cannot update network connection when VM HREF is unset") + } + + // Retrieve current network configuration so that we are not altering any other internal fields + updateNetwork, err := vm.GetNetworkConnectionSection() + if err != nil { + return fmt.Errorf("cannot read network section for update: %s", err) + } + updateNetwork.PrimaryNetworkConnectionIndex = networks.PrimaryNetworkConnectionIndex + updateNetwork.NetworkConnection = networks.NetworkConnection + updateNetwork.Ovf = types.XMLNamespaceOVF + + task, err := vm.client.ExecuteTaskRequest(vm.VM.HREF+"/networkConnectionSection/", http.MethodPut, + types.MimeNetworkConnectionSection, "error updating network connection: %s", updateNetwork) + if err != nil { + return err + } + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error waiting for task completion after network update for vm %s: %s", vm.VM.Name, err) + } + + return nil +} + +// Deprecated: use client.GetVMByHref instead +func (client *Client) FindVMByHREF(vmHREF string) (VM, error) { + + newVm := NewVM(client) + + _, err := client.ExecuteRequest(vmHREF, http.MethodGet, + "", "error retrieving VM: %s", nil, newVm.VM) + + return *newVm, err + +} + +func (vm *VM) PowerOn() (Task, error) { + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/power/action/powerOn" + + // Return the task + return vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error powering on VM: %s", nil) + +} + +// PowerOnAndForceCustomization is a synchronous function which is equivalent to the functionality +// one has in UI. It triggers customization which may be useful in some cases (like altering NICs) +// +// The VM _must_ be un-deployed for this action to actually work. +func (vm *VM) PowerOnAndForceCustomization() error { + // PowerOnAndForceCustomization only works if the VM was previously un-deployed + vmIsDeployed, err := vm.IsDeployed() + if err != nil { + return fmt.Errorf("unable to check if VM %s is un-deployed forcing customization: %s", + vm.VM.Name, err) + } + + if vmIsDeployed { + return fmt.Errorf("VM %s must be undeployed before forcing customization", vm.VM.Name) + } + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/action/deploy" + + powerOnAndCustomize := &types.DeployVAppParams{ + Xmlns: types.XMLNamespaceVCloud, + PowerOn: true, + ForceCustomization: true, + } + + task, err := vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error powering on VM with customization: %s", powerOnAndCustomize) + + if err != nil { + return err + } + + err = task.WaitTaskCompletion() + if err != nil { + return fmt.Errorf("error waiting for task completion after power on with customization %s: %s", vm.VM.Name, err) + } + + return nil +} + +func (vm *VM) PowerOff() (Task, error) { + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/power/action/powerOff" + + // Return the task + return vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPost, + "", "error powering off VM: %s", nil) +} + +// ChangeCPUCount sets number of available virtual logical processors +// (i.e. CPUs x cores per socket) +// Cpu cores count is inherited from template. +// https://communities.vmware.com/thread/576209 +func (vm *VM) ChangeCPUCount(virtualCpuCount int) (Task, error) { + return vm.ChangeCPUCountWithCore(virtualCpuCount, nil) +} + +// ChangeCPUCountWithCore sets number of available virtual logical processors +// (i.e. CPUs x cores per socket) and cores per socket. +// Socket count is a result of: virtual logical processors/cores per socket +// https://communities.vmware.com/thread/576209 +func (vm *VM) ChangeCPUCountWithCore(virtualCpuCount int, coresPerSocket *int) (Task, error) { + + err := vm.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing VM before running customization: %s", err) + } + + newCpu := &types.OVFItem{ + XmlnsRasd: types.XMLNamespaceRASD, + XmlnsVCloud: types.XMLNamespaceVCloud, + XmlnsXsi: types.XMLNamespaceXSI, + XmlnsVmw: types.XMLNamespaceVMW, + VCloudHREF: vm.VM.HREF + "/virtualHardwareSection/cpu", + VCloudType: types.MimeRasdItem, + AllocationUnits: "hertz * 10^6", + Description: "Number of Virtual CPUs", + ElementName: strconv.Itoa(virtualCpuCount) + " virtual CPU(s)", + InstanceID: 4, + Reservation: 0, + ResourceType: types.ResourceTypeProcessor, + VirtualQuantity: int64(virtualCpuCount), + CoresPerSocket: coresPerSocket, + Link: &types.Link{ + HREF: vm.VM.HREF + "/virtualHardwareSection/cpu", + Rel: "edit", + Type: types.MimeRasdItem, + }, + } + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/virtualHardwareSection/cpu" + + // Return the task + return vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeRasdItem, "error changing CPU count: %s", newCpu) + +} + +func (vm *VM) updateNicParameters(networks []map[string]interface{}, networkSection *types.NetworkConnectionSection) error { + for tfNicSlot, network := range networks { + for loopIndex := range networkSection.NetworkConnection { + // Change network config only if we have the same virtual slot number as in .tf config + if tfNicSlot == networkSection.NetworkConnection[loopIndex].NetworkConnectionIndex { + + // Determine what type of address is requested for the vApp + var ipAllocationMode string + ipAddress := "Any" + + var ipFieldString string + ipField, ipIsSet := network["ip"] + if ipIsSet { + ipFieldString = ipField.(string) + } + + switch { + // TODO v3.0 remove from here when deprecated `ip` and `network_name` attributes are removed + case ipIsSet && ipFieldString == "dhcp": // Deprecated ip="dhcp" mode + ipAllocationMode = types.IPAllocationModeDHCP + case ipIsSet && ipFieldString == "allocated": // Deprecated ip="allocated" mode + ipAllocationMode = types.IPAllocationModePool + case ipIsSet && ipFieldString == "none": // Deprecated ip="none" mode + ipAllocationMode = types.IPAllocationModeNone + + // Deprecated ip="valid_ip" mode (currently it is hit by ip_allocation_mode=MANUAL as well) + case ipIsSet && net.ParseIP(ipFieldString) != nil: + ipAllocationMode = types.IPAllocationModeManual + ipAddress = ipFieldString + case ipIsSet && ipFieldString != "": // Deprecated ip="something_invalid" we default to DHCP. This is odd but backwards compatible. + ipAllocationMode = types.IPAllocationModeDHCP + // TODO v3.0 remove until here when deprecated `ip` and `network_name` attributes are removed + + // Removed for Coverity warning: dead code - We can reinstate after removing above code + //case ipIsSet && net.ParseIP(ipFieldString) != nil && (network["ip_allocation_mode"].(string) == types.IPAllocationModeManual): + // ipAllocationMode = types.IPAllocationModeManual + // ipAddress = ipFieldString + default: // New networks functionality. IP was not set and we're defaulting to provided ip_allocation_mode (only manual requires the IP) + ipAllocationMode = network["ip_allocation_mode"].(string) + } + + networkSection.NetworkConnection[loopIndex].NeedsCustomization = true + networkSection.NetworkConnection[loopIndex].IsConnected = true + networkSection.NetworkConnection[loopIndex].IPAddress = ipAddress + networkSection.NetworkConnection[loopIndex].IPAddressAllocationMode = ipAllocationMode + + // for IPAllocationModeNone we hardcode special network name used by vcd 'none' + if ipAllocationMode == types.IPAllocationModeNone { + networkSection.NetworkConnection[loopIndex].Network = types.NoneNetwork + } else { + if _, ok := network["network_name"]; !ok { + return fmt.Errorf("could not identify network name") + } + networkSection.NetworkConnection[loopIndex].Network = network["network_name"].(string) + } + + // If we have one NIC only then it is primary by default, otherwise we check for "is_primary" key + if (len(networks) == 1) || (network["is_primary"] != nil && network["is_primary"].(bool)) { + networkSection.PrimaryNetworkConnectionIndex = tfNicSlot + } + } + } + } + return nil +} + +// ChangeNetworkConfig allows to update existing VM NIC configuration.f +func (vm *VM) ChangeNetworkConfig(networks []map[string]interface{}) (Task, error) { + err := vm.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing VM before running customization: %s", err) + } + + networkSection, err := vm.GetNetworkConnectionSection() + if err != nil { + return Task{}, fmt.Errorf("could not retrieve network connection for VM: %s", err) + } + + err = vm.updateNicParameters(networks, networkSection) + if err != nil { + return Task{}, fmt.Errorf("failed processing NIC parameters: %s", err) + } + + networkSection.Xmlns = types.XMLNamespaceVCloud + networkSection.Ovf = types.XMLNamespaceOVF + networkSection.Info = "Specifies the available VM network connections" + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/networkConnectionSection/" + + // Return the task + return vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeNetworkConnectionSection, "error changing network config: %s", networkSection) +} + +func (vm *VM) ChangeMemorySize(size int) (Task, error) { + + err := vm.Refresh() + if err != nil { + return Task{}, fmt.Errorf("error refreshing VM before running customization: %s", err) + } + + newMem := &types.OVFItem{ + XmlnsRasd: types.XMLNamespaceRASD, + XmlnsVCloud: types.XMLNamespaceVCloud, + XmlnsXsi: types.XMLNamespaceXSI, + VCloudHREF: vm.VM.HREF + "/virtualHardwareSection/memory", + VCloudType: types.MimeRasdItem, + AllocationUnits: "byte * 2^20", + Description: "Memory SizeMb", + ElementName: strconv.Itoa(size) + " MB of memory", + InstanceID: 5, + Reservation: 0, + ResourceType: types.ResourceTypeMemory, + VirtualQuantity: int64(size), + Weight: 0, + Link: &types.Link{ + HREF: vm.VM.HREF + "/virtualHardwareSection/memory", + Rel: "edit", + Type: types.MimeRasdItem, + }, + } + + apiEndpoint := urlParseRequestURI(vm.VM.HREF) + apiEndpoint.Path += "/virtualHardwareSection/memory" + + // Return the task + return vm.client.ExecuteTaskRequest(apiEndpoint.String(), http.MethodPut, + types.MimeRasdItem, "error changing memory size: %s", newMem) +} + +func (vm *VM) RunCustomizationScript(computerName, script string) (Task, error) { + return vm.Customize(computerName, script, false) +} + +// GetGuestCustomizationStatus retrieves guest customization status. +// It can be one of "GC_PENDING", "REBOOT_PENDING", "GC_FAILED", "POST_GC_PENDING", "GC_COMPLETE" +func (vm *VM) GetGuestCustomizationStatus() (string, error) { + guestCustomizationStatus := &types.GuestCustomizationStatusSection{} + + if vm.VM.HREF == "" { + return "", fmt.Errorf("cannot retrieve guest customization, VM HREF is empty") + } + + _, err := vm.client.ExecuteRequest(vm.VM.HREF+"/guestcustomizationstatus", http.MethodGet, + types.MimeGuestCustomizationStatus, "error retrieving guest customization status: %s", nil, guestCustomizationStatus) + + // The request was successful + return guestCustomizationStatus.GuestCustStatus, err +} + +// BlockWhileGuestCustomizationStatus blocks until the customization status of VM exits unwantedStatus. +// It sleeps 3 seconds between iterations and times out after timeOutAfterSeconds of seconds. +// +// timeOutAfterSeconds must be more than 4 and less than 2 hours (60s*120) +func (vm *VM) BlockWhileGuestCustomizationStatus(unwantedStatus string, timeOutAfterSeconds int) error { + if timeOutAfterSeconds < 5 || timeOutAfterSeconds > 60*120 { + return fmt.Errorf("timeOutAfterSeconds must be in range 4 end +// 4. if error, validation with VM checks +// 4a. if validation error, it was a VM issue: return combined original error + validation error +// 4b. if no validation error, the failure was due to something else: return only original error +func validateAffinityRule(client *Client, affinityRuleDef *types.VmAffinityRule, checkVMs bool) (*types.VmAffinityRule, error) { + if affinityRuleDef == nil { + return nil, fmt.Errorf("empty definition given for a VM affinity rule") + } + if affinityRuleDef.Name == "" { + return nil, fmt.Errorf("no name given for a VM affinity rule") + } + if affinityRuleDef.Polarity == "" { + return nil, fmt.Errorf("no polarity given for a VM affinity rule") + } + if !validPolarity(affinityRuleDef.Polarity) { + return nil, fmt.Errorf("illegal polarity given (%s) for a VM affinity rule", affinityRuleDef.Polarity) + } + // Ensure the VMs in the list are different + var seenVms = make(map[string]bool) + var allVmMap = make(map[string]bool) + if checkVMs { + vmList, err := client.QueryVmList(types.VmQueryFilterOnlyDeployed) + if err != nil { + return nil, fmt.Errorf("error getting VM list : %s", err) + } + for _, vm := range vmList { + allVmMap[extractUuid(vm.HREF)] = true + } + } + for _, vmr := range affinityRuleDef.VmReferences { + if len(vmr.VMReference) == 0 { + continue + } + for _, vm := range vmr.VMReference { + if vm == nil { + continue + } + // The only mandatory field is the HREF + if vm.HREF == "" { + return nil, fmt.Errorf("empty VM HREF provided in VM list") + } + _, seen := seenVms[vm.HREF] + if seen { + return nil, fmt.Errorf("VM HREF %s used more than once", vm.HREF) + } + seenVms[vm.HREF] = true + + if checkVMs { + // Checking that the VMs indicated exist. + // Without this check, if any of the VMs do not exist, we would get an ugly error that doesn't easily explain + // the nature of the problem, such as + // > "error instantiating a new VM affinity rule: API Error: 403: [ ... ] + // > Either you need some or all of the following rights [ORG_VDC_VM_VM_AFFINITY_EDIT] + // > to perform operations [VAPP_VM_EDIT_AFFINITY_RULE] for $OP_ID or the target entity is invalid" + + _, vmInList := allVmMap[extractUuid(vm.HREF)] + if !vmInList { + return nil, fmt.Errorf("VM identified by '%s' not found ", vm.HREF) + } + } + } + } + if len(seenVms) < 2 { + return nil, fmt.Errorf("at least 2 VMs should be given for a VM Affinity Rule") + } + return affinityRuleDef, nil +} + +// CreateVmAffinityRuleAsync creates a new VM affinity rule, and returns a task that handles the operation +func (vdc *Vdc) CreateVmAffinityRuleAsync(affinityRuleDef *types.VmAffinityRule) (Task, error) { + + var err error + // We validate the input, without a strict check on the VMs + affinityRuleDef, err = validateAffinityRule(vdc.client, affinityRuleDef, false) + if err != nil { + return Task{}, fmt.Errorf("[CreateVmAffinityRuleAsync] %s", err) + } + + affinityRuleDef.Xmlns = types.XMLNamespaceVCloud + + href := vdc.getLinkHref("add", "application/vnd.vmware.vcloud.vmaffinityrule+xml") + if href == "" { + return Task{}, fmt.Errorf("no link with VM affinity rule found in VDC %s", vdc.Vdc.Name) + } + + task, err := vdc.client.ExecuteTaskRequest(href, http.MethodPost, + "application/vnd.vmware.vcloud.vmaffinityrule+xml", "error instantiating a new VM affinity rule: %s", affinityRuleDef) + if err != nil { + // if we get any error, we repeat the validation + // with a strict check on VM existence. + _, validationErr := validateAffinityRule(vdc.client, affinityRuleDef, true) + if validationErr != nil { + // If we get any error from the validation now, it should be an invalid VM, + // so we combine the original error with the validation error + return Task{}, fmt.Errorf("%s - %s", err, validationErr) + } + // If the validation error is nil, we return just the original error + return Task{}, err + } + return task, err +} + +// CreateVmAffinityRule is a wrap around CreateVmAffinityRuleAsync that handles the task and returns the finished object +func (vdc *Vdc) CreateVmAffinityRule(affinityRuleDef *types.VmAffinityRule) (*VmAffinityRule, error) { + + task, err := vdc.CreateVmAffinityRuleAsync(affinityRuleDef) + if err != nil { + return nil, err + } + // The rule ID is the ID of the task owner (see Task definition in types.go) + ruleId := task.Task.Owner.ID + + err = task.WaitTaskCompletion() + if err != nil { + return nil, err + } + + // Retrieving the newly created rule using the ID from the task + vmAffinityRule, err := vdc.GetVmAffinityRuleById(ruleId) + if err != nil { + return nil, fmt.Errorf("error retrieving VmAffinityRule %s using ID %s: %s", affinityRuleDef.Name, ruleId, err) + } + return vmAffinityRule, nil +} + +// Delete removes a VM affinity rule from vCD +func (vmar *VmAffinityRule) Delete() error { + + if vmar == nil || vmar.VmAffinityRule == nil { + return fmt.Errorf("nil VM Affinity Rule passed for deletion") + } + + if vmar.VmAffinityRule.HREF == "" { + return fmt.Errorf("VM Affinity Rule passed for deletion has no HREF") + } + + deleteHref := vmar.VmAffinityRule.HREF + linkHref := vmar.getLinkHref("remove") + if linkHref != "" { + deleteHref = linkHref + } + + deleteTask, err := vmar.client.ExecuteTaskRequest(deleteHref, http.MethodDelete, + "", "error removing VM Affinity Rule : %s", nil) + if err != nil { + return err + } + return deleteTask.WaitTaskCompletion() +} + +// getLinkHref returns an HREF for a given value of Rel +func (vmar *VmAffinityRule) getLinkHref(rel string) string { + if vmar.VmAffinityRule.Link != nil { + for _, link := range vmar.VmAffinityRule.Link { + if link.Rel == rel { + return link.HREF + } + } + } + return "" +} + +// Update modifies a VM affinity rule using as input +// the entity's internal data. +func (vmar *VmAffinityRule) Update() error { + var err error + var affinityRuleDef *types.VmAffinityRule + + if vmar == nil || vmar.VmAffinityRule == nil { + return fmt.Errorf("nil VM Affinity Rule passed for update") + } + if vmar.VmAffinityRule.HREF == "" { + return fmt.Errorf("VM Affinity Rule passed for update has no HREF") + } + + // We validate the input, without a strict check on the VMs + affinityRuleDef, err = validateAffinityRule(vmar.client, vmar.VmAffinityRule, false) + if err != nil { + return fmt.Errorf("[Update] %s", err) + } + vmar.VmAffinityRule = affinityRuleDef + + updateRef := vmar.VmAffinityRule.HREF + linkHref := vmar.getLinkHref("edit") + if linkHref != "" { + updateRef = linkHref + } + + vmar.VmAffinityRule.Link = nil + vmar.VmAffinityRule.VCloudExtension = nil + updateTask, err := vmar.client.ExecuteTaskRequest(updateRef, http.MethodPut, + "", "error updating VM Affinity Rule : %s", vmar.VmAffinityRule) + if err != nil { + // if we get any error, we repeat the validation + // with a strict check on VM existence. + _, validationErr := validateAffinityRule(vmar.client, affinityRuleDef, true) + // If we get any error from the validation now, it should be an invalid VM, + // so we combine the original error with the validation error + if validationErr != nil { + return fmt.Errorf("%s - %s", err, validationErr) + } + // If the validation error is nil, we return just the original error + return err + } + err = updateTask.WaitTaskCompletion() + if err != nil { + return err + } + return vmar.Refresh() +} + +// Refresh gets a fresh copy of the VM affinity rule from vCD +func (vmar *VmAffinityRule) Refresh() error { + var newVmAffinityRule types.VmAffinityRule + _, err := vmar.client.ExecuteRequest(vmar.VmAffinityRule.HREF, http.MethodGet, + "", "error retrieving affinity rule: %v", nil, &newVmAffinityRule) + if err != nil { + return err + } + vmar.VmAffinityRule = &newVmAffinityRule + return nil +} + +// SetEnabled is a shortcut to update only the IsEnabled property of a VM affinity rule +func (vmar *VmAffinityRule) SetEnabled(value bool) error { + if vmar.VmAffinityRule.IsEnabled != nil { + currentValue := *vmar.VmAffinityRule.IsEnabled + if currentValue == value { + return nil + } + } + vmar.VmAffinityRule.IsEnabled = takeBoolPointer(value) + return vmar.Update() +} + +// SetMandatory is a shortcut to update only the IsMandatory property of a VM affinity rule +func (vmar *VmAffinityRule) SetMandatory(value bool) error { + if vmar.VmAffinityRule.IsMandatory != nil { + currentValue := *vmar.VmAffinityRule.IsMandatory + if currentValue == value { + return nil + } + } + vmar.VmAffinityRule.IsMandatory = takeBoolPointer(value) + return vmar.Update() +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/constants.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/constants.go new file mode 100644 index 000000000..48ea3ec0f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/constants.go @@ -0,0 +1,480 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package types + +const ( + // PublicCatalog Name + PublicCatalog = "Public Catalog" + + // DefaultCatalog Name + DefaultCatalog = "Default Catalog" + + // JSONMimeV57 the json mime for version 5.7 of the API + JSONMimeV57 = "application/json;version=5.7" + // AnyXMLMime511 the wildcard xml mime for version 5.11 of the API + AnyXMLMime511 = "application/*+xml;version=5.11" + AnyXMLMime = "application/xml" + // Version511 the 5.11 version + Version511 = "5.11" + // Version is the default version number + Version = Version511 + // SoapXML mime type + SoapXML = "application/soap+xml" + // JSONMime + JSONMime = "application/json" +) + +const ( + // MimeOrgList mime for org list + MimeOrgList = "application/vnd.vmware.vcloud.orgList+xml" + // MimeOrg mime for org + MimeOrg = "application/vnd.vmware.vcloud.org+xml" + // MimeAdminOrg mime for admin org + MimeAdminOrg = "application/vnd.vmware.admin.organization+xml" + // MimeCatalog mime for catalog + MimeCatalog = "application/vnd.vmware.vcloud.catalog+xml" + // MimeCatalogItem mime for catalog item + MimeCatalogItem = "application/vnd.vmware.vcloud.catalogItem+xml" + // MimeVDC mime for a VDC + MimeVDC = "application/vnd.vmware.vcloud.vdc+xml" + // MimeVDC mime for a admin VDC + MimeAdminVDC = "application/vnd.vmware.admin.vdc+xml" + // MimeEdgeGateway mime for an Edge Gateway + MimeEdgeGateway = "application/vnd.vmware.admin.edgeGateway+xml" + // MimeVAppTemplate mime for a vapp template + MimeVAppTemplate = "application/vnd.vmware.vcloud.vAppTemplate+xml" + // MimeVApp mime for a vApp + MimeVApp = "application/vnd.vmware.vcloud.vApp+xml" + // MimeQueryRecords mime for the query records + MimeQueryRecords = "application/vnd.vmware.vcloud.query.records+xml" + // MimeAPIExtensibility mime for api extensibility + MimeAPIExtensibility = "application/vnd.vmware.vcloud.apiextensibility+xml" + // MimeEntity mime for vcloud entity + MimeEntity = "application/vnd.vmware.vcloud.entity+xml" + // MimeQueryList mime for query list + MimeQueryList = "application/vnd.vmware.vcloud.query.queryList+xml" + // MimeSession mime for a session + MimeSession = "application/vnd.vmware.vcloud.session+xml" + // MimeTask mime for task + MimeTask = "application/vnd.vmware.vcloud.task+xml" + // MimeError mime for error + MimeError = "application/vnd.vmware.vcloud.error+xml" + // MimeNetwork mime for a network + MimeNetwork = "application/vnd.vmware.vcloud.network+xml" + // MimeOrgVdcNetwork mime for an Org VDC network + MimeOrgVdcNetwork = "application/vnd.vmware.vcloud.orgVdcNetwork+xml" + //MimeDiskCreateParams mime for create independent disk + MimeDiskCreateParams = "application/vnd.vmware.vcloud.diskCreateParams+xml" + // Mime for VMs + MimeVMs = "application/vnd.vmware.vcloud.vms+xml" + // Mime for attach or detach independent disk + MimeDiskAttachOrDetachParams = "application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml" + // Mime for Disk + MimeDisk = "application/vnd.vmware.vcloud.disk+xml" + // Mime for insert or eject media + MimeMediaInsertOrEjectParams = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml" + // Mime for catalog + MimeAdminCatalog = "application/vnd.vmware.admin.catalog+xml" + // Mime for virtual hardware section + MimeVirtualHardwareSection = "application/vnd.vmware.vcloud.virtualHardwareSection+xml" + // Mime for networkConnectionSection + MimeNetworkConnectionSection = "application/vnd.vmware.vcloud.networkConnectionSection+xml" + // Mime for Item + MimeRasdItem = "application/vnd.vmware.vcloud.rasdItem+xml" + // Mime for guest customization section + MimeGuestCustomizationSection = "application/vnd.vmware.vcloud.guestCustomizationSection+xml" + // Mime for guest customization status + MimeGuestCustomizationStatus = "application/vnd.vmware.vcloud.guestcustomizationstatussection" + // Mime for network config section + MimeNetworkConfigSection = "application/vnd.vmware.vcloud.networkconfigsection+xml" + // Mime for recompose vApp params + MimeRecomposeVappParams = "application/vnd.vmware.vcloud.recomposeVAppParams+xml" + // Mime for compose vApp params + MimeComposeVappParams = "application/vnd.vmware.vcloud.composeVAppParams+xml" + // Mime for undeploy vApp params + MimeUndeployVappParams = "application/vnd.vmware.vcloud.undeployVAppParams+xml" + // Mime for deploy vApp params + MimeDeployVappParams = "application/vnd.vmware.vcloud.deployVAppParams+xml" + // Mime for VM + MimeVM = "application/vnd.vmware.vcloud.vm+xml" + // Mime for instantiate vApp template params + MimeInstantiateVappTemplateParams = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml" + // Mime for product section + MimeProductSection = "application/vnd.vmware.vcloud.productSections+xml" + // Mime for metadata + MimeMetaData = "application/vnd.vmware.vcloud.metadata+xml" + // Mime for metadata value + MimeMetaDataValue = "application/vnd.vmware.vcloud.metadata.value+xml" + // Mime for a admin network + MimeExtensionNetwork = "application/vnd.vmware.admin.extension.network+xml" + // Mime for an external network + MimeExternalNetwork = "application/vnd.vmware.admin.vmwexternalnet+xml" + // Mime of an Org User + MimeAdminUser = "application/vnd.vmware.admin.user+xml" + // MimeAdminGroup specifies groups + MimeAdminGroup = "application/vnd.vmware.admin.group+xml" + // MimeOrgLdapSettings + MimeOrgLdapSettings = "application/vnd.vmware.admin.organizationldapsettings+xml" + // Mime of vApp network + MimeVappNetwork = "application/vnd.vmware.vcloud.vAppNetwork+xml" + // Mime of access control + MimeControlAccess = "application/vnd.vmware.vcloud.controlAccess+xml" + // Mime of VM capabilities + MimeVmCapabilities = "application/vnd.vmware.vcloud.vmCapabilitiesSection+xml" + // Mime of Vdc Compute Policy References + MimeVdcComputePolicyReferences = "application/vnd.vmware.vcloud.vdcComputePolicyReferences+xml" + // Mime for Storage profile + MimeStorageProfile = "application/vnd.vmware.admin.vdcStorageProfile+xml " + // Mime for create VM Params + MimeCreateVmParams = "application/vnd.vmware.vcloud.CreateVmParams+xml" + // Mime for instantiate VM Params from template + MimeInstantiateVmTemplateParams = "application/vnd.vmware.vcloud.instantiateVmTemplateParams+xml" + // Mime for adding or removing VDC storage profiles + MimeUpdateVdcStorageProfiles = "application/vnd.vmware.admin.updateVdcStorageProfiles+xml" + // Mime to modify lease settings + MimeLeaseSettingSection = "application/vnd.vmware.vcloud.leaseSettingsSection+xml" +) + +const ( + VMsCDResourceSubType = "vmware.cdrom.iso" +) + +// https://blogs.vmware.com/vapp/2009/11/virtual-hardware-in-ovf-part-1.html + +const ( + ResourceTypeOther int = 0 + ResourceTypeProcessor int = 3 + ResourceTypeMemory int = 4 + ResourceTypeIDE int = 5 + ResourceTypeSCSI int = 6 + ResourceTypeEthernet int = 10 + ResourceTypeFloppy int = 14 + ResourceTypeCD int = 15 + ResourceTypeDVD int = 16 + ResourceTypeDisk int = 17 + ResourceTypeUSB int = 23 +) + +const ( + FenceModeIsolated = "isolated" + FenceModeBridged = "bridged" + FenceModeNAT = "natRouted" +) + +const ( + IPAllocationModeDHCP = "DHCP" + IPAllocationModeManual = "MANUAL" + IPAllocationModeNone = "NONE" + IPAllocationModePool = "POOL" +) + +// NoneNetwork is a special type of network in vCD which represents a network card which is not +// attached to any network. +const ( + NoneNetwork = "none" +) + +const ( + XMLNamespaceVCloud = "http://www.vmware.com/vcloud/v1.5" + XMLNamespaceOVF = "http://schemas.dmtf.org/ovf/envelope/1" + XMLNamespaceVMW = "http://www.vmware.com/schema/ovf" + XMLNamespaceXSI = "http://www.w3.org/2001/XMLSchema-instance" + XMLNamespaceRASD = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" + XMLNamespaceVSSD = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" + XMLNamespaceExtension = "http://www.vmware.com/vcloud/extension/v1.5" +) + +// NSX-V Edge gateway API endpoints +const ( + EdgeNatPath = "/nat/config" + EdgeCreateNatPath = "/nat/config/rules" + EdgeFirewallPath = "/firewall/config" + EdgeCreateFirewallPath = "/firewall/config/rules" + EdgeVnicConfig = "/vnics" + EdgeVdcVnicConfig = "/vdcNetworks" + EdgeDhcpRelayPath = "/dhcp/config/relay" + EdgeDhcpLeasePath = "/dhcp/leaseInfo" + LbConfigPath = "/loadbalancer/config/" + LbMonitorPath = "/loadbalancer/config/monitors/" + LbServerPoolPath = "/loadbalancer/config/pools/" + LbAppProfilePath = "/loadbalancer/config/applicationprofiles/" + LbAppRulePath = "/loadbalancer/config/applicationrules/" + LbVirtualServerPath = "/loadbalancer/config/virtualservers/" +) + +// NSX-V proxied services API endpoints +const ( + NsxvIpSetServicePath = "/ipset" +) + +// Guest customization statuses. These are all known possible statuses +const ( + GuestCustStatusPending = "GC_PENDING" + GuestCustStatusPostPending = "POST_GC_PENDING" + GuestCustStatusComplete = "GC_COMPLETE" + GuestCustStatusFailed = "GC_FAILED" + GuestCustStatusRebootPending = "REBOOT_PENDING" +) + +// Edge gateway vNic types +const ( + EdgeGatewayVnicTypeUplink = "uplink" + EdgeGatewayVnicTypeInternal = "internal" + EdgeGatewayVnicTypeTrunk = "trunk" + EdgeGatewayVnicTypeSubinterface = "subinterface" + EdgeGatewayVnicTypeAny = "any" +) + +// Names of the filters allowed in the search engine +const ( + FilterNameRegex = "name_regex" // a name, searched by regular expression + FilterDate = "date" // a date expression (>|<|==|>=|<= date) + FilterIp = "ip" // An IP, searched by regular expression + FilterLatest = "latest" // gets the newest element + FilterEarliest = "earliest" // gets the oldest element + FilterParent = "parent" // matches the entity parent + FilterParentId = "parent_id" // matches the entity parent ID +) + +const ( + // The Qt* (Query Type) constants are the names used with Query requests to retrieve the corresponding entities + QtVappTemplate = "vAppTemplate" // vApp template + QtAdminVappTemplate = "adminVAppTemplate" // vApp template as admin + QtEdgeGateway = "edgeGateway" // edge gateway + QtOrgVdcNetwork = "orgVdcNetwork" // Org VDC network + QtCatalog = "catalog" // catalog + QtAdminCatalog = "adminCatalog" // catalog as admin + QtCatalogItem = "catalogItem" // catalog item + QtAdminCatalogItem = "adminCatalogItem" // catalog item as admin + QtAdminMedia = "adminMedia" // media item as admin + QtMedia = "media" // media item + QtVm = "vm" // Virtual machine + QtAdminVm = "adminVM" // Virtual machine as admin + QtVapp = "vApp" // vApp + QtAdminVapp = "adminVApp" // vApp as admin + QtOrgVdc = "orgVdc" // Org VDC + QtAdminOrgVdc = "adminOrgVdc" // Org VDC as admin +) + +// AdminQueryTypes returns the corresponding "admin" query type for each regular type +var AdminQueryTypes = map[string]string{ + QtEdgeGateway: QtEdgeGateway, // EdgeGateway query type is the same for admin and regular users + QtOrgVdcNetwork: QtOrgVdcNetwork, // Org VDC Network query type is the same for admin and regular users + QtVappTemplate: QtAdminVappTemplate, + QtCatalog: QtAdminCatalog, + QtCatalogItem: QtAdminCatalogItem, + QtMedia: QtAdminMedia, + QtVm: QtAdminVm, + QtVapp: QtAdminVapp, + QtOrgVdc: QtAdminOrgVdc, +} + +const ( + // Affinity and anti affinity definitions + PolarityAffinity = "Affinity" + PolarityAntiAffinity = "Anti-Affinity" +) + +// VmQueryFilter defines how we search VMs +type VmQueryFilter int + +const ( + // VmQueryFilterAll defines a no-filter search, i.e. will return all elements + VmQueryFilterAll VmQueryFilter = iota + + // VmQueryFilterOnlyDeployed defines a filter for deployed VMs + VmQueryFilterOnlyDeployed + + // VmQueryFilterOnlyTemplates defines a filter for VMs inside a template + VmQueryFilterOnlyTemplates +) + +// String converts a VmQueryFilter into the corresponding filter needed by the query to get the wanted result +func (qf VmQueryFilter) String() string { + // Makes sure that we handle out-of-range values + if qf < VmQueryFilterAll || qf > VmQueryFilterOnlyTemplates { + return "" + } + return [...]string{ + "", // No filter: will not remove any items + "isVAppTemplate==false", // Will find only the deployed VMs + "isVAppTemplate==true", // Will find only those VM that are inside a template + }[qf] +} + +// LDAP modes for Organization +const ( + LdapModeNone = "NONE" + LdapModeSystem = "SYSTEM" + LdapModeCustom = "CUSTOM" +) + +// Access control modes +const ( + ControlAccessReadOnly = "ReadOnly" + ControlAccessReadWrite = "Change" + ControlAccessFullControl = "FullControl" +) + +// BodyType allows to define API body types where applicable +type BodyType int + +const ( + // BodyTypeXML + BodyTypeXML BodyType = iota + + // BodyTypeJSON + BodyTypeJSON +) + +const ( + // FiqlQueryTimestampFormat is the format accepted by Cloud API time comparison operator in FIQL query filters + FiqlQueryTimestampFormat = "2006-01-02T15:04:05.000Z" +) + +// These constants allow constructing OpenAPI endpoint paths and avoid strings in code for easy replacement in the +// future. +const ( + OpenApiPathVersion1_0_0 = "1.0.0/" + OpenApiEndpointRoles = "roles/" + OpenApiEndpointGlobalRoles = "globalRoles/" + OpenApiEndpointRights = "rights/" + OpenApiEndpointRightsCategories = "rightsCategories/" + OpenApiEndpointRightsBundles = "rightsBundles/" + OpenApiEndpointAuditTrail = "auditTrail/" + OpenApiEndpointImportableTier0Routers = "nsxTResources/importableTier0Routers" + OpenApiEndpointImportableSwitches = "/network/orgvdcnetworks/importableswitches" + OpenApiEndpointEdgeClusters = "nsxTResources/edgeClusters" + OpenApiEndpointExternalNetworks = "externalNetworks/" + OpenApiEndpointVdcComputePolicies = "vdcComputePolicies/" + OpenApiEndpointVdcAssignedComputePolicies = "vdcs/%s/computePolicies" + OpenApiEndpointVdcCapabilities = "vdcs/%s/capabilities" + OpenApiEndpointEdgeGateways = "edgeGateways/" + OpenApiEndpointNsxtFirewallRules = "edgeGateways/%s/firewall/rules" + OpenApiEndpointFirewallGroups = "firewallGroups/" + OpenApiEndpointOrgVdcNetworks = "orgVdcNetworks/" + OpenApiEndpointOrgVdcNetworksDhcp = "orgVdcNetworks/%s/dhcp" + OpenApiEndpointNsxtNatRules = "edgeGateways/%s/nat/rules/" + OpenApiEndpointAppPortProfiles = "applicationPortProfiles/" + OpenApiEndpointIpSecVpnTunnel = "edgeGateways/%s/ipsec/tunnels/" + OpenApiEndpointIpSecVpnTunnelConnectionProperties = "edgeGateways/%s/ipsec/tunnels/%s/connectionProperties" + OpenApiEndpointIpSecVpnTunnelStatus = "edgeGateways/%s/ipsec/tunnels/%s/status" + OpenApiEndpointSSLCertificateLibrary = "ssl/certificateLibrary/" + OpenApiEndpointSSLCertificateLibraryOld = "ssl/cetificateLibrary/" + OpenApiEndpointSessionCurrent = "sessions/current" + OpenApiEndpointVdcGroups = "vdcGroups/" + OpenApiEndpointVdcGroupsCandidateVdcs = "vdcGroups/networkingCandidateVdcs" + OpenApiEndpointVdcGroupsDfwPolicies = "vdcGroups/%s/dfwPolicies" + OpenApiEndpointVdcGroupsDfwDefaultPolicies = "vdcGroups/%s/dfwPolicies/default" + + // NSX-T ALB related endpoints + + OpenApiEndpointAlbController = "loadBalancer/controllers/" + + // OpenApiEndpointAlbImportableClouds endpoint requires a filter _context==urn:vcloud:loadBalancerController:aa23ef66-ba32-48b2-892f-7acdffe4587e + OpenApiEndpointAlbImportableClouds = "nsxAlbResources/importableClouds/" + OpenApiEndpointAlbImportableServiceEngineGroups = "nsxAlbResources/importableServiceEngineGroups" + OpenApiEndpointAlbCloud = "loadBalancer/clouds/" + OpenApiEndpointAlbServiceEngineGroups = "loadBalancer/serviceEngineGroups/" + OpenApiEndpointAlbPools = "loadBalancer/pools/" + // OpenApiEndpointAlbPoolSummaries returns a limited subset of data provided by OpenApiEndpointAlbPools + // however only the summary endpoint can list all available pools for an edge gateway + OpenApiEndpointAlbPoolSummaries = "edgeGateways/%s/loadBalancer/poolSummaries" // %s contains edge gateway + OpenApiEndpointAlbVirtualServices = "loadBalancer/virtualServices/" + OpenApiEndpointAlbVirtualServiceSummaries = "edgeGateways/%s/loadBalancer/virtualServiceSummaries" // %s contains edge gateway + OpenApiEndpointAlbServiceEngineGroupAssignments = "loadBalancer/serviceEngineGroups/assignments/" + OpenApiEndpointAlbEdgeGateway = "edgeGateways/%s/loadBalancer" +) + +// Header keys to run operations in tenant context +const ( + // HeaderTenantContext requires the Org ID of the tenant + HeaderTenantContext = "X-VMWARE-VCLOUD-TENANT-CONTEXT" + // HeaderAuthContext requires the Org name of the tenant + HeaderAuthContext = "X-VMWARE-VCLOUD-AUTH-CONTEXT" +) + +const ( + // ExternalNetworkBackingTypeNsxtTier0Router defines backing type of NSX-T Tier-0 router + ExternalNetworkBackingTypeNsxtTier0Router = "NSXT_TIER0" + // ExternalNetworkBackingTypeNsxtVrfTier0Router defines backing type of NSX-T Tier-0 VRF router + ExternalNetworkBackingTypeNsxtVrfTier0Router = "NSXT_VRF_TIER0" + // ExternalNetworkBackingTypeNsxtSegment defines backing type of NSX-T Segment (supported in VCD 10.3+) + ExternalNetworkBackingTypeNsxtSegment = "IMPORTED_T_LOGICAL_SWITCH" + // ExternalNetworkBackingTypeNetwork defines vSwitch portgroup + ExternalNetworkBackingTypeNetwork = "NETWORK" + // ExternalNetworkBackingDvPortgroup refers distributed switch portgroup + ExternalNetworkBackingDvPortgroup = "DV_PORTGROUP" +) + +const ( + // OrgVdcNetworkTypeRouted can be used to create NSX-T or NSX-V routed Org Vdc network + OrgVdcNetworkTypeRouted = "NAT_ROUTED" + // OrgVdcNetworkTypeIsolated can be used to create NSX-T or NSX-V isolated Org Vdc network + OrgVdcNetworkTypeIsolated = "ISOLATED" + // OrgVdcNetworkTypeOpaque type is used to create NSX-T imported Org Vdc network + OrgVdcNetworkTypeOpaque = "OPAQUE" + // OrgVdcNetworkTypeDirect can be used to create NSX-V direct Org Vdc network + OrgVdcNetworkTypeDirect = "DIRECT" +) + +const ( + // VdcCapabilityNetworkProviderNsxv is a convenience constant to match VDC capability + VdcCapabilityNetworkProviderNsxv = "NSX_V" + // VdcCapabilityNetworkProviderNsxt is a convenience constant to match VDC capability + VdcCapabilityNetworkProviderNsxt = "NSX_T" +) + +const ( + // FirewallGroupTypeSecurityGroup can be used in types.NsxtFirewallGroup for 'type' field to + // create Security Group + FirewallGroupTypeSecurityGroup = "SECURITY_GROUP" + // FirewallGroupTypeIpSet can be used in types.NsxtFirewallGroup for 'type' field to create IP + // Set + FirewallGroupTypeIpSet = "IP_SET" +) + +// These constants can be used to pick type of NSX-T NAT Rule +const ( + NsxtNatRuleTypeDnat = "DNAT" + NsxtNatRuleTypeNoDnat = "NO_DNAT" + NsxtNatRuleTypeSnat = "SNAT" + NsxtNatRuleTypeNoSnat = "NO_SNAT" + NsxtNatRuleTypeReflexive = "REFLEXIVE" // Only in VCD 10.3+ (API V36.0) +) + +// In VCD versions 10.2.2+ (API V35.2+) there is a FirewallMatch field in NAT rule with these +// options +const ( + // NsxtNatRuleFirewallMatchInternalAddress will match firewall rules based on NAT rules internal + // address (DEFAULT) + NsxtNatRuleFirewallMatchInternalAddress = "MATCH_INTERNAL_ADDRESS" + // NsxtNatRuleFirewallMatchExternalAddress will match firewall rules based on NAT rule external + // address + NsxtNatRuleFirewallMatchExternalAddress = "MATCH_EXTERNAL_ADDRESS" + // NsxtNatRuleFirewallMatchBypass will skip evaluating NAT rules in firewall + NsxtNatRuleFirewallMatchBypass = "BYPASS" +) + +const ( + // ApplicationPortProfileScopeSystem is a defined scope which allows user to only read (no write capability) system + // predefined Application Port Profiles + ApplicationPortProfileScopeSystem = "SYSTEM" + // ApplicationPortProfileScopeProvider allows user to read and set Application Port Profiles at provider level. In + // reality Network Provider (NSX-T Manager) must be specified while creating. + ApplicationPortProfileScopeProvider = "PROVIDER" + // ApplicationPortProfileScopeTenant allows user to read and set Application Port Profiles at Org VDC level. + ApplicationPortProfileScopeTenant = "TENANT" +) + +const ( + // VcloudUndefinedKey is the bundles key automatically added to new role related objects + VcloudUndefinedKey = "com.vmware.vcloud.undefined.key" +) + +const ( + // NsxtAlbCloudBackingTypeNsxtAlb is a backing type for NSX-T ALB used in types.NsxtAlbCloudBacking + NsxtAlbCloudBackingTypeNsxtAlb = "NSXALB_NSXT" +) diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/link.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/link.go new file mode 100644 index 000000000..ec166bb2e --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/link.go @@ -0,0 +1,215 @@ +/* + * Copyright 2018 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package types + +// LinkPredicate is a predicate for finding links in a link list +type LinkPredicate func(*Link) bool + +func byTypeAndRel(tpe, rel string) LinkPredicate { + if rel == "" { + rel = RelDown + } + return func(lnk *Link) bool { + return lnk != nil && lnk.Type == tpe && lnk.Rel == rel + } +} + +func byNameTypeAndRel(nme, tpe, rel string) LinkPredicate { + tpePred := byTypeAndRel(tpe, rel) + return func(lnk *Link) bool { + return tpePred(lnk) && lnk.Name == nme + } +} + +// LinkList represents a list of links +type LinkList []*Link + +// Find the first occurrence that matches the predicate +func (l LinkList) Find(predicate LinkPredicate) *Link { + for _, lnk := range l { + if predicate(lnk) { + return lnk + } + } + return nil +} + +// ForType finds a link for a given type +func (l LinkList) ForType(tpe, rel string) *Link { + return l.Find(byTypeAndRel(tpe, rel)) +} + +// ForName finds a link for a given name and type +func (l LinkList) ForName(name, tpe, rel string) *Link { + return l.Find(byNameTypeAndRel(name, tpe, rel)) +} + +const ( + RelDown = "down" + RelAdd = "add" + RelUp = "up" + RelEdit = "edit" + RelRemove = "remove" + RelCopy = "copy" + RelMove = "move" + RelAlternate = "alternate" + RelTaskCancel = "task:cancel" + RelDeploy = "deploy" + RelUndeploy = "undeploy" + RelDiscardState = "discardState" + RelPowerOn = "power:powerOn" + RelPowerOff = "power:powerOff" + RelPowerReset = "power:reset" + RelPowerReboot = "power:reboot" + RelPowerSuspend = "power:suspend" + RelPowerShutdown = "power:shutdown" + + RelScreenThumbnail = "screen:thumbnail" + RelScreenAcquireTicket = "screen:acquireTicket" + RelScreenAcquireMksTicket = "screen:acquireMksTicket" + + RelMediaInsertMedia = "media:insertMedia" + RelMediaEjectMedia = "media:ejectMedia" + + RelDiskAttach = "disk:attach" + RelDiskDetach = "disk:detach" + + RelUploadDefault = "upload:default" + RelUploadAlternate = "upload:alternate" + + RelDownloadDefault = "download:default" + RelDownloadAlternate = "download:alternate" + RelDownloadIdentity = "download:identity" + + RelSnapshotCreate = "snapshot:create" + RelSnapshotRevertToCurrent = "snapshot:revertToCurrent" + RelSnapshotRemoveAll = "snapshot:removeAll" + + RelOVF = "ovf" + RelOVA = "ova" + RelControlAccess = "controlAccess" + RelPublish = "publish" + RelPublishExternal = "publishToExternalOrganizations" + RelSubscribeExternal = "subscribeToExternalCatalog" + RelExtension = "extension" + RelEnable = "enable" + RelDisable = "disable" + RelMerge = "merge" + RelCatalogItem = "catalogItem" + RelRecompose = "recompose" + RelRegister = "register" + RelUnregister = "unregister" + RelRepair = "repair" + RelReconnect = "reconnect" + RelDisconnect = "disconnect" + RelUpgrade = "upgrade" + RelAnswer = "answer" + RelAddOrgs = "addOrgs" + RelRemoveOrgs = "removeOrgs" + RelSync = "sync" + + RelVSphereWebClientURL = "vSphereWebClientUrl" + RelVimServerDvSwitches = "vimServerDvSwitches" + + RelCollaborationResume = "resume" + RelCollaborationFail = "fail" + RelEnterMaintenanceMode = "enterMaintenanceMode" + RelExitMaintenanceMode = "exitMaintenanceMode" + RelTask = "task" + RelTaskOwner = "task:owner" + RelPreviousPage = "previousPage" + RelNextPage = "nextPage" + RelFirstPage = "firstPage" + RelLastPage = "lastPage" + RelInstallVMWareTools = "installVmwareTools" + RelConsolidate = "consolidate" + RelEntity = "entity" + RelEntityResolver = "entityResolver" + RelRelocate = "relocate" + RelBlockingTasks = "blockingTasks" + RelUpdateProgress = "updateProgress" + RelSyncSyslogSettings = "syncSyslogSettings" + RelTakeOwnership = "takeOwnership" + RelUnlock = "unlock" + RelShadowVMs = "shadowVms" + RelTest = "test" + RelUpdateResourcePools = "update:resourcePools" + RelRemoveForce = "remove:force" + RelStorageClass = "storageProfile" + RelRefreshStorageClasses = "refreshStorageProfile" + RelRefreshVirtualCenter = "refreshVirtualCenter" + RelCheckCompliance = "checkCompliance" + RelForceFullCustomization = "customizeAtNextPowerOn" + RelReloadFromVC = "reloadFromVc" + RelMetricsDayView = "interval:day" + RelMetricsWeekView = "interval:week" + RelMetricsMonthView = "interval:month" + RelMetricsYearView = "interval:year" + RelMetricsPreviousRange = "range:previous" + RelMetricsNextRange = "range:next" + RelMetricsLatestRange = "range:latest" + RelRights = "rights" + RelMigratVMs = "migrateVms" + RelResourcePoolVMList = "resourcePoolVmList" + RelCreateEvent = "event:create" + RelCreateTask = "task:create" + RelUploadBundle = "bundle:upload" + RelCleanupBundles = "bundles:cleanup" + RelAuthorizationCheck = "authorization:check" + RelCleanupRights = "rights:cleanup" + + RelEdgeGatewayRedeploy = "edgeGateway:redeploy" + RelEdgeGatewayReapplyServices = "edgeGateway:reapplyServices" + RelEdgeGatewayConfigureServices = "edgeGateway:configureServices" + RelEdgeGatewayConfigureSyslog = "edgeGateway:configureSyslogServerSettings" + RelEdgeGatewaySyncSyslogSettings = "edgeGateway:syncSyslogSettings" + RelEdgeGatewayUpgrade = "edgeGateway:upgrade" + RelEdgeGatewayUpgradeNetworking = "edgeGateway:convertToAdvancedNetworking" + RelVDCManageFirewall = "manageFirewall" + + RelCertificateUpdate = "certificate:update" + RelCertificateReset = "certificate:reset" + RelTruststoreUpdate = "truststore:update" + RelTruststoreReset = "truststore:reset" + RelKeyStoreUpdate = "keystore:update" + RelKeystoreReset = "keystore:reset" + RelKeytabUpdate = "keytab:update" + RelKeytabReset = "keytab:reset" + + RelServiceLinks = "down:serviceLinks" + RelAPIFilters = "down:apiFilters" + RelResourceClasses = "down:resourceClasses" + RelResourceClassActions = "down:resourceClassActions" + RelServices = "down:services" + RelACLRules = "down:aclRules" + RelFileDescriptors = "down:fileDescriptors" + RelAPIDefinitions = "down:apiDefinitions" + RelServiceResources = "down:serviceResources" + RelExtensibility = "down:extensibility" + RelAPIServiceQuery = "down:service" + RelAPIDefinitionsQuery = "down:apidefinitions" + RelAPIFilesQuery = "down:files" + RelServiceOfferings = "down:serviceOfferings" + RelServiceOfferingInstances = "down:serviceOfferingInstances" + RelHybrid = "down:hybrid" + + RelServiceRefresh = "service:refresh" + RelServiceAssociate = "service:associate" + RelServiceDisassociate = "service:disassociate" + + RelReconfigureVM = "reconfigureVM" + + RelOrgVDCGateways = "edgeGateways" + RelOrgVDCNetworks = "orgVdcNetworks" + + RelHybridAcquireControlTicket = "hybrid:acquireControlTicket" + RelHybridAcquireTicket = "hybrid:acquireTicket" + RelHybridRefreshTunnel = "hybrid:refreshTunnel" + + RelMetrics = "metrics" + + RelFederationRegenerateCertificate = "federation:regenerateFederationCertificate" + RelTemplateInstantiate = "instantiate" +) diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxt_types.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxt_types.go new file mode 100644 index 000000000..8b705fa93 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxt_types.go @@ -0,0 +1,1108 @@ +package types + +// OpenAPIEdgeGateway structure supports marshalling both - NSX-V and NSX-T edge gateways as returned by OpenAPI +// endpoint (cloudapi/1.0.0edgeGateways/), but the endpoint only allows users to create NSX-T edge gateways. +type OpenAPIEdgeGateway struct { + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + // Name of edge gateway + Name string `json:"name"` + // Description of edge gateway + Description string `json:"description"` + // OwnerRef defines Org VDC or VDC Group that this network belongs to. If the ownerRef is set to a VDC Group, this + // network will be available across all the VDCs in the vDC Group. If the VDC Group is backed by a NSX-V network + // provider, the Org VDC network is automatically connected to the distributed router associated with the VDC Group + // and the "connection" field does not need to be set. For API version 35.0 and above, this field should be set for + // network creation. + OwnerRef *OpenApiReference `json:"ownerRef,omitempty"` + // OrgVdc holds the organization vDC or vDC Group that this edge gateway belongs to. If the ownerRef is set to a VDC + // Group, this gateway will be available across all the participating Organization vDCs in the VDC Group. + OrgVdc *OpenApiReference `json:"orgVdc,omitempty"` + // Org holds the organization to which the gateway belongs. + Org *OpenApiReference `json:"orgRef,omitempty"` + // EdgeGatewayUplink defines uplink connections for the edge gateway. + EdgeGatewayUplinks []EdgeGatewayUplinks `json:"edgeGatewayUplinks"` + // DistributedRoutingEnabled is a flag indicating whether distributed routing is enabled or not. The default is false. + DistributedRoutingEnabled *bool `json:"distributedRoutingEnabled,omitempty"` + // EdgeClusterConfig holds Edge Cluster Configuration for the Edge Gateway. Can be specified if a gateway needs to be + // placed on a specific set of Edge Clusters. For NSX-T Edges, user should specify the ID of the NSX-T edge cluster as + // the value of primaryEdgeCluster's backingId. The gateway defaults to the Edge Cluster of the connected External + // Network's backing Tier-0 router, if nothing is specified. + // + // Note. The value of secondaryEdgeCluster will be set to NULL for NSX-T edge gateways. For NSX-V Edges, this is + // read-only and the legacy API must be used for edge specific placement. + EdgeClusterConfig *OpenAPIEdgeGatewayEdgeClusterConfig `json:"edgeClusterConfig,omitempty"` + // OrgVdcNetworkCount holds the number of Org VDC networks connected to the gateway. + OrgVdcNetworkCount *int `json:"orgVdcNetworkCount,omitempty"` + // GatewayBacking must contain backing details of the edge gateway only if importing an NSX-T router. + GatewayBacking *OpenAPIEdgeGatewayBacking `json:"gatewayBacking,omitempty"` + + // ServiceNetworkDefinition holds network definition in CIDR form that DNS and DHCP service on an NSX-T edge will run + // on. The subnet prefix length must be 27. This property applies to creating or importing an NSX-T Edge. This is not + // supported for VMC. If nothing is set, the default is 192.168.255.225/27. The DHCP listener IP network is on + // 192.168.255.225/30. The DNS listener IP network is on 192.168.255.228/32. This field cannot be updated. + ServiceNetworkDefinition string `json:"serviceNetworkDefinition,omitempty"` +} + +// EdgeGatewayUplink defines uplink connections for the edge gateway. +type EdgeGatewayUplinks struct { + // UplinkID contains ID of external network + UplinkID string `json:"uplinkId,omitempty"` + // UplinkID contains Name of external network + UplinkName string `json:"uplinkName,omitempty"` + // Subnets contain subnets to be used on edge gateway + Subnets OpenAPIEdgeGatewaySubnets `json:"subnets,omitempty"` + Connected bool `json:"connected,omitempty"` + // QuickAddAllocatedIPCount allows users to allocate additional IPs during update + QuickAddAllocatedIPCount int `json:"quickAddAllocatedIpCount,omitempty"` + // Dedicated defines if the external network is dedicated. Dedicating the External Network will enable Route + // Advertisement for this Edge Gateway + Dedicated bool `json:"dedicated,omitempty"` +} + +// OpenApiIPRanges is a type alias to reuse the same definitions with appropriate names +type OpenApiIPRanges = ExternalNetworkV2IPRanges + +// OpenApiIPRangeValues is a type alias to reuse the same definitions with appropriate names +type OpenApiIPRangeValues = ExternalNetworkV2IPRange + +// OpenAPIEdgeGatewaySubnets lists slice of OpenAPIEdgeGatewaySubnetValue values +type OpenAPIEdgeGatewaySubnets struct { + Values []OpenAPIEdgeGatewaySubnetValue `json:"values"` +} + +// OpenAPIEdgeGatewaySubnetValue holds one subnet definition in external network +type OpenAPIEdgeGatewaySubnetValue struct { + // Gateway specified subnet gateway + Gateway string `json:"gateway"` + // PrefixLength from CIDR format (e.g. 24 from 192.168.1.1/24) + PrefixLength int `json:"prefixLength"` + // DNSSuffix can only be used for reading NSX-V edge gateway + DNSSuffix string `json:"dnsSuffix,omitempty"` + // DNSServer1 can only be used for reading NSX-V edge gateway + DNSServer1 string `json:"dnsServer1,omitempty"` + // DNSServer2 can only be used for reading NSX-V edge gateway + DNSServer2 string `json:"dnsServer2,omitempty"` + // IPRanges contain IP allocations + IPRanges *OpenApiIPRanges `json:"ipRanges,omitempty"` + // Enabled toggles if the subnet is enabled + Enabled bool `json:"enabled"` + TotalIPCount int `json:"totalIpCount,omitempty"` + UsedIPCount int `json:"usedIpCount,omitempty"` + PrimaryIP string `json:"primaryIp,omitempty"` + AutoAllocateIPRanges bool `json:"autoAllocateIpRanges,omitempty"` +} + +// OpenAPIEdgeGatewayBacking specifies edge gateway backing details +type OpenAPIEdgeGatewayBacking struct { + BackingID string `json:"backingId,omitempty"` + GatewayType string `json:"gatewayType,omitempty"` + NetworkProvider NetworkProvider `json:"networkProvider"` +} + +// OpenAPIEdgeGatewayEdgeCluster allows users to specify edge cluster reference +type OpenAPIEdgeGatewayEdgeCluster struct { + EdgeClusterRef OpenApiReference `json:"edgeClusterRef"` + BackingID string `json:"backingId"` +} + +type OpenAPIEdgeGatewayEdgeClusterConfig struct { + PrimaryEdgeCluster OpenAPIEdgeGatewayEdgeCluster `json:"primaryEdgeCluster,omitempty"` + SecondaryEdgeCluster OpenAPIEdgeGatewayEdgeCluster `json:"secondaryEdgeCluster,omitempty"` +} + +// OpenApiOrgVdcNetwork allows users to manage Org Vdc networks +type OpenApiOrgVdcNetwork struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status,omitempty"` + // OwnerRef defines Org VDC or VDC Group that this network belongs to. If the ownerRef is set to a VDC Group, this + // network will be available across all the VDCs in the vDC Group. If the VDC Group is backed by a NSX-V network + // provider, the Org VDC network is automatically connected to the distributed router associated with the VDC Group + // and the "connection" field does not need to be set. For API version 35.0 and above, this field should be set for + // network creation. + // + // Note. In lower API versions (i.e. 32.0) this field is not recognized and OrgVdc should be used instead + OwnerRef *OpenApiReference `json:"ownerRef,omitempty"` + + OrgVdc *OpenApiReference `json:"orgVdc,omitempty"` + + // NetworkType describes type of Org Vdc network. ('NAT_ROUTED', 'ISOLATED') + NetworkType string `json:"networkType"` + + // Connection specifies the edge gateway this network is connected to. + // + // Note. When NetworkType == ISOLATED, there is no uplink connection. + Connection *Connection `json:"connection,omitempty"` + + // backingNetworkId contains the NSX ID of the backing network. + BackingNetworkId string `json:"backingNetworkId,omitempty"` + // backingNetworkType contains object type of the backing network. ('VIRTUAL_WIRE' for NSX-V, 'NSXT_FLEXIBLE_SEGMENT' + // for NSX-T) + BackingNetworkType string `json:"backingNetworkType,omitempty"` + + // ParentNetwork should have external network ID specified when creating NSX-V direct network + ParentNetwork *OpenApiReference `json:"parentNetworkId"` + + // GuestVlanTaggingAllowed specifies whether guest VLAN tagging is allowed + GuestVlanTaggingAllowed *bool `json:"guestVlanTaggingAllowed"` + + // Subnets contains list of subnets defined on + Subnets OrgVdcNetworkSubnets `json:"subnets"` + + // SecurityGroups defines a list of firewall groups of type SECURITY_GROUP that are assigned to the Org VDC Network. + // These groups can then be used in firewall rules to protect the Org VDC Network and allow/deny traffic. + SecurityGroups *OpenApiReferences `json:"securityGroups,omitempty"` + + // RouteAdvertised reports if this network is advertised so that it can be routed out to the external networks. This + // applies only to network backed by NSX-T. Value will be unset if route advertisement is not applicable. + RouteAdvertised *bool `json:"routeAdvertised,omitempty"` + + // TotalIpCount is a read only attribute reporting total number of IPs available in network + TotalIpCount *int `json:"totalIpCount"` + + // UsedIpCount is a read only attribute reporting number of used IPs in network + UsedIpCount *int `json:"usedIpCount"` + + // Shared shares network with other VDCs in the organization + Shared *bool `json:"shared,omitempty"` +} + +// OrgVdcNetworkSubnetIPRanges is a type alias to reuse the same definitions with appropriate names +type OrgVdcNetworkSubnetIPRanges = ExternalNetworkV2IPRanges + +// OrgVdcNetworkSubnetIPRangeValues is a type alias to reuse the same definitions with appropriate names +type OrgVdcNetworkSubnetIPRangeValues = ExternalNetworkV2IPRange + +//OrgVdcNetworkSubnets +type OrgVdcNetworkSubnets struct { + Values []OrgVdcNetworkSubnetValues `json:"values"` +} + +type OrgVdcNetworkSubnetValues struct { + Gateway string `json:"gateway"` + PrefixLength int `json:"prefixLength"` + DNSServer1 string `json:"dnsServer1"` + DNSServer2 string `json:"dnsServer2"` + DNSSuffix string `json:"dnsSuffix"` + IPRanges OrgVdcNetworkSubnetIPRanges `json:"ipRanges"` +} + +// Connection specifies the edge gateway this network is connected to +type Connection struct { + RouterRef OpenApiReference `json:"routerRef"` + ConnectionType string `json:"connectionType,omitempty"` +} + +// NsxtImportableSwitch is a type alias with better name for holding NSX-T Segments (Logical Switches) which can be used +// to back NSX-T imported Org VDC network +type NsxtImportableSwitch = OpenApiReference + +// OpenApiOrgVdcNetworkDhcp allows users to manage DHCP configuration for Org VDC networks by using OpenAPI endpoint +type OpenApiOrgVdcNetworkDhcp struct { + Enabled *bool `json:"enabled,omitempty"` + LeaseTime *int `json:"leaseTime,omitempty"` + DhcpPools []OpenApiOrgVdcNetworkDhcpPools `json:"dhcpPools,omitempty"` + // Mode describes how the DHCP service is configured for this network. Once a DHCP service has been created, the mode + // attribute cannot be changed. The mode field will default to 'EDGE' if it is not provided. This field only applies + // to networks backed by an NSX-T network provider. + // + // The supported values are EDGE (default) and NETWORK. + // * If EDGE is specified, the DHCP service of the edge is used to obtain DHCP IPs. + // * If NETWORK is specified, a DHCP server is created for use by this network. (To use NETWORK + // + // In order to use DHCP for IPV6, NETWORK mode must be used. Routed networks which are using NETWORK DHCP services can + // be disconnected from the edge gateway and still retain their DHCP configuration, however network using EDGE DHCP + // cannot be disconnected from the gateway until DHCP has been disabled. + Mode string `json:"mode,omitempty"` + // IPAddress is only applicable when mode=NETWORK. This will specify IP address of DHCP server in network. + IPAddress string `json:"ipAddress,omitempty"` +} + +// OpenApiOrgVdcNetworkDhcpIpRange is a type alias to fit naming +type OpenApiOrgVdcNetworkDhcpIpRange = ExternalNetworkV2IPRange + +type OpenApiOrgVdcNetworkDhcpPools struct { + // Enabled defines if the DHCP pool is enabled or not + Enabled *bool `json:"enabled,omitempty"` + // IPRange holds IP ranges + IPRange OpenApiOrgVdcNetworkDhcpIpRange `json:"ipRange"` + // MaxLeaseTime is the maximum lease time that can be accepted on clients request + // This applies for NSX-V Isolated network + MaxLeaseTime *int `json:"maxLeaseTime,omitempty"` + // DefaultLeaseTime is the lease time that clients get if they do not specify particular lease time + // This applies for NSX-V Isolated network + DefaultLeaseTime *int `json:"defaultLeaseTime,omitempty"` +} + +// NsxtFirewallGroup allows users to set either SECURITY_GROUP or IP_SET which is defined by Type field. +// SECURITY_GROUP (constant types.FirewallGroupTypeSecurityGroup) is a dynamic structure which +// allows users to add Routed Org VDC networks +// +// IP_SET (constant FirewallGroupTypeIpSet) allows users to enter static IPs and later on firewall rules +// can be created both of these objects +// +// When the type is SECURITY_GROUP 'Members' field is used to specify Org VDC networks +// When the type is IP_SET 'IpAddresses' field is used to specify IP addresses or ranges +// field is used +type NsxtFirewallGroup struct { + // ID contains Firewall Group ID (URN format) + // e.g. urn:vcloud:firewallGroup:d7f4e0b4-b83f-4a07-9f22-d242c9c0987a + ID string `json:"id"` + // Name of Firewall Group. Name are unique per 'Type'. There cannot be two SECURITY_GROUP or two + // IP_SET objects with the same name, but there can be one object of Type SECURITY_GROUP and one + // of Type IP_SET named the same. + Name string `json:"name"` + Description string `json:"description"` + // IP Addresses included in the group. This is only applicable for IP_SET Firewall Groups. This + // can support IPv4 and IPv6 addresses in single, range, and CIDR formats. + // E.g [ + // "12.12.12.1", + // "10.10.10.0/24", + // "11.11.11.1-11.11.11.2", + // "2001:db8::/48", + // "2001:db6:0:0:0:0:0:0-2001:db6:0:ffff:ffff:ffff:ffff:ffff", + // ], + IpAddresses []string `json:"ipAddresses,omitempty"` + + // Members define list of Org VDC networks belonging to this Firewall Group (only for Security + // groups ) + Members []OpenApiReference `json:"members,omitempty"` + + // OwnerRef replaces EdgeGatewayRef in API V35.0+ and can accept both - NSX-T Edge Gateway or a + // VDC group ID + // Sample VDC Group URN - urn:vcloud:vdcGroup:89a53000-ef41-474d-80dc-82431ff8a020 + // Sample Edge Gateway URN - urn:vcloud:gateway:71df3e4b-6da9-404d-8e44-0865751c1c38 + // + // Note. Using API V34.0 Firewall Groups can be created for VDC groups, but on a GET operation + // there will be no VDC group ID returned. + OwnerRef *OpenApiReference `json:"ownerRef,omitempty"` + + // EdgeGatewayRef is a deprecated field (use OwnerRef) for setting value, but during read the + // value is only populated in this field (not OwnerRef) + EdgeGatewayRef *OpenApiReference `json:"edgeGatewayRef,omitempty"` + + // Type is either SECURITY_GROUP or IP_SET + Type string `json:"type"` +} + +// NsxtFirewallGroupMemberVms is a structure to read NsxtFirewallGroup associated VMs when its type +// is SECURITY_GROUP +type NsxtFirewallGroupMemberVms struct { + VmRef *OpenApiReference `json:"vmRef"` + // VappRef will be empty if it is a standalone VM (although hidden vApp exists) + VappRef *OpenApiReference `json:"vappRef"` + VdcRef *OpenApiReference `json:"vdcRef"` + OrgRef *OpenApiReference `json:"orgRef"` +} + +// NsxtFirewallRule defines single NSX-T Firewall Rule +type NsxtFirewallRule struct { + // ID contains UUID (e.g. d0bf5d51-f83a-489a-9323-1661024874b8) + ID string `json:"id,omitempty"` + // Name - API does not enforce uniqueness + Name string `json:"name"` + // Action 'ALLOW', 'DROP' + Action string `json:"action"` + // Enabled allows to enable or disable the rule + Enabled bool `json:"enabled"` + // SourceFirewallGroups contains a list of references to Firewall Groups. Empty list means 'Any' + SourceFirewallGroups []OpenApiReference `json:"sourceFirewallGroups,omitempty"` + // DestinationFirewallGroups contains a list of references to Firewall Groups. Empty list means 'Any' + DestinationFirewallGroups []OpenApiReference `json:"destinationFirewallGroups,omitempty"` + // ApplicationPortProfiles contains a list of references to Application Port Profiles. Empty list means 'Any' + ApplicationPortProfiles []OpenApiReference `json:"applicationPortProfiles,omitempty"` + // IpProtocol 'IPV4', 'IPV6', 'IPV4_IPV6' + IpProtocol string `json:"ipProtocol"` + Logging bool `json:"logging"` + // Direction 'IN_OUT', 'OUT', 'IN' + Direction string `json:"direction"` + // Version of firewall rule. Must not be set when creating. + Version *struct { + // Version is incremented after each update + Version *int `json:"version,omitempty"` + } `json:"version,omitempty"` +} + +// NsxtFirewallRuleContainer wraps NsxtFirewallRule for user-defined and default and system Firewall Rules suitable for +// API. Only UserDefinedRules are writeable. Others are read-only. +type NsxtFirewallRuleContainer struct { + // SystemRules contain ordered list of system defined edge firewall rules. System rules are applied before user + // defined rules in the order in which they are returned. + SystemRules []*NsxtFirewallRule `json:"systemRules"` + // DefaultRules contain ordered list of user defined edge firewall rules. Users are allowed to add/modify/delete rules + // only to this list. + DefaultRules []*NsxtFirewallRule `json:"defaultRules"` + // UserDefinedRules ordered list of default edge firewall rules. Default rules are applied after the user defined + // rules in the order in which they are returned. + UserDefinedRules []*NsxtFirewallRule `json:"userDefinedRules"` +} + +// NsxtAppPortProfile allows user to set custom application port definitions so that these can later be used +// in NSX-T Firewall rules in combination with IP Sets and Security Groups. +type NsxtAppPortProfile struct { + ID string `json:"id,omitempty"` + // Name must be unique per Scope + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + // ApplicationPorts contains one or more protocol and port definitions + ApplicationPorts []NsxtAppPortProfilePort `json:"applicationPorts,omitempty"` + // OrgRef must contain at least Org ID when SCOPE==TENANT + OrgRef *OpenApiReference `json:"orgRef,omitempty"` + // ContextEntityId must contain: + // * NSX-T Manager URN (when scope==PROVIDER) + // * VDC or VDC Group ID (when scope==TENANT) + ContextEntityId string `json:"contextEntityId,omitempty"` + // Scope can be one of the following: + // * SYSTEM - Read-only (The ones that are provided by SYSTEM). Constant `types.ApplicationPortProfileScopeSystem` + // * PROVIDER - Created by Provider on a particular network provider (NSX-T manager). Constant `types.ApplicationPortProfileScopeProvider` + // * TENANT (Created by Tenant at Org VDC level). Constant `types.ApplicationPortProfileScopeTenant` + // + // When scope==PROVIDER: + // OrgRef is not required + // ContextEntityId must have NSX-T Managers URN + // When scope==TENANT + // OrgRef ID must be specified + // ContextEntityId must be set to VDC or VDC group URN + Scope string `json:"scope,omitempty"` +} + +// NsxtAppPortProfilePort allows user to set protocol and one or more ports +type NsxtAppPortProfilePort struct { + // Protocol can be one of the following: + // * "ICMPv4" + // * "ICMPv6" + // * "TCP" + // * "UDP" + Protocol string `json:"protocol"` + // DestinationPorts is optional, but can define list of ports ("1000", "1500") or port ranges ("1200-1400") + DestinationPorts []string `json:"destinationPorts,omitempty"` +} + +// NsxtNatRule describes a single NAT rule of 4 diferent RuleTypes - DNAT`, `NO_DNAT`, `SNAT`, `NO_SNAT`. +// +// A SNAT or a DNAT rule on an Edge Gateway in the VMware Cloud Director environment, you always configure the rule +// from the perspective of your organization VDC. +// DNAT and NO_DNAT - outside traffic going inside +// SNAT and NO_SNAT - inside traffic going outside +// More docs in https://docs.vmware.com/en/VMware-Cloud-Director/10.2/VMware-Cloud-Director-Tenant-Portal-Guide/GUID-9E43E3DC-C028-47B3-B7CA-59F0ED40E0A6.html +type NsxtNatRule struct { + ID string `json:"id,omitempty"` + // Name holds a meaningful name for the rule. (API does not enforce uniqueness) + Name string `json:"name"` + // Description holds optional description for the rule + Description string `json:"description"` + // Enabled defines if the rule is active + Enabled bool `json:"enabled"` + + // RuleType - one of the following: `DNAT`, `NO_DNAT`, `SNAT`, `NO_SNAT` + // * An SNAT rule translates an internal IP to an external IP and is used for outbound traffic + // * A NO SNAT rule prevents the translation of the internal IP address of packets sent from an organization VDC out + // to an external network or to another organization VDC network. + // * A DNAT rule translates the external IP to an internal IP and is used for inbound traffic. + // * A NO DNAT rule prevents the translation of the external IP address of packets received by an organization VDC + // from an external network or from another organization VDC network. + // Deprecated in API V36.0 + RuleType string `json:"ruleType,omitempty"` + // Type replaces RuleType in V36.0 and adds a new Rule - REFLEXIVE + Type string `json:"type,omitempty"` + + // ExternalAddresses + // * SNAT - enter the public IP address of the edge gateway for which you are configuring the SNAT rule. + // * NO_SNAT - leave empty (but field cannot be skipped at all, therefore it does not have 'omitempty' tag) + // + // * DNAT - public IP address of the edge gateway for which you are configuring the DNAT rule. The IP + // addresses that you enter must belong to the suballocated IP range of the edge gateway. + // * NO_DNAT - leave empty + ExternalAddresses string `json:"externalAddresses"` + + // InternalAddresses + // * SNAT - the IP address or a range of IP addresses of the virtual machines for which you are configuring SNAT, so + // that they can send traffic to the external network. + // + // * DNAT - enter the IP address or a range of IP addresses of the virtual machines for which you are configuring + // DNAT, so that they can receive traffic from the external network. + // * NO_DNAT - leave empty + InternalAddresses string `json:"internalAddresses"` + ApplicationPortProfile *OpenApiReference `json:"applicationPortProfile,omitempty"` + + // InternalPort specifies port number or port range for incoming network traffic. If Any Traffic is selected for the + // Application Port Profile, the default internal port is "ANY". + // Deprecated since API V35.0 and is replaced by DnatExternalPort + InternalPort string `json:"internalPort,omitempty"` + + // DnatExternalPort can set a port into which the DNAT rule is translating for the packets inbound to the virtual + // machines. + DnatExternalPort string `json:"dnatExternalPort,omitempty"` + + // SnatDestinationAddresses applies only for RuleTypes `SNAT`, `NO_SNAT` + // If you want the rule to apply only for traffic to a specific domain, enter an IP address for this domain or an IP + // address range in CIDR format. If you leave this text box blank, the SNAT rule applies to all destinations outside + // of the local subnet. + SnatDestinationAddresses string `json:"snatDestinationAddresses,omitempty"` + + // Logging enabled or disabled logging of that rule + Logging bool `json:"logging"` + + // Below two fields are only supported in VCD 10.2.2+ (API v35.2) + + // FirewallMatch determines how the firewall matches the address during NATing if firewall stage is not skipped. + // * MATCH_INTERNAL_ADDRESS indicates the firewall will be applied to internal address of a NAT rule. For SNAT, the + // internal address is the original source address before NAT is done. For DNAT, the internal address is the translated + // destination address after NAT is done. For REFLEXIVE, to egress traffic, the internal address is the original + // source address before NAT is done; to ingress traffic, the internal address is the translated destination address + // after NAT is done. + // * MATCH_EXTERNAL_ADDRESS indicates the firewall will be applied to external address of a NAT rule. For SNAT, the + // external address is the translated source address after NAT is done. For DNAT, the external address is the original + // destination address before NAT is done. For REFLEXIVE, to egress traffic, the external address is the translated + // internal address after NAT is done; to ingress traffic, the external address is the original destination address + // before NAT is done. + // * BYPASS firewall stage will be skipped. + FirewallMatch string `json:"firewallMatch,omitempty"` + // Priority helps to select rule with highest priority if an address has multiple NAT rules. A lower value means a + // higher precedence for this rule. Maximum value 2147481599 + Priority *int `json:"priority,omitempty"` + + // Version of NAT rule. Must not be set when creating. + Version *struct { + // Version is incremented after each update + Version *int `json:"version,omitempty"` + } `json:"version,omitempty"` +} + +// NsxtIpSecVpnTunnel defines the IPsec VPN Tunnel configuration +// Some of the fields like AuthenticationMode and ConnectorInitiationMode are meant for future, because they have only +// one default value at the moment. +type NsxtIpSecVpnTunnel struct { + // ID unique for IPsec VPN tunnel. On updates, the ID is required for the tunnel, while for create a new ID will be + // generated. + ID string `json:"id,omitempty"` + // Name for the IPsec VPN Tunnel + Name string `json:"name"` + // Description for the IPsec VPN Tunnel + Description string `json:"description,omitempty"` + // Enabled describes whether the IPsec VPN Tunnel is enabled or not. The default is true. + Enabled bool `json:"enabled"` + // LocalEndpoint which corresponds to the Edge Gateway the IPsec VPN Tunnel is being configured on. Local Endpoint + // requires an IP. That IP must be sub-allocated to the edge gateway + LocalEndpoint NsxtIpSecVpnTunnelLocalEndpoint `json:"localEndpoint"` + // RemoteEndpoint corresponds to the device on the remote site terminating the VPN tunnel + RemoteEndpoint NsxtIpSecVpnTunnelRemoteEndpoint `json:"remoteEndpoint"` + // PreSharedKey is key used for authentication. It must be the same on the other end of IPsec VPN Tunnel + PreSharedKey string `json:"preSharedKey"` + // SecurityType is the security type used for the IPsec VPN Tunnel. If nothing is specified, this will be set to + // DEFAULT in which the default settings in NSX will be used. For custom settings, one should use the + // NsxtIpSecVpnTunnelSecurityProfile and UpdateTunnelConnectionProperties(), GetTunnelConnectionProperties() endpoint to + // specify custom settings. The security type will then appropriately reflect itself as CUSTOM. + // To revert back to system default, this field must be set to "DEFAULT" + SecurityType string `json:"securityType,omitempty"` + // Logging sets whether logging for the tunnel is enabled or not. The default is false. + Logging bool `json:"logging"` + + // AuthenticationMode is authentication mode this IPsec tunnel will use to authenticate with the peer endpoint. The + // default is a pre-shared key (PSK). + // * PSK - A known key is shared between each site before the tunnel is established. + // * CERTIFICATE - Incoming connections are required to present an identifying digital certificate, which VCD verifies + // has been signed by a trusted certificate authority. + // + // Note. Up to version 10.3 VCD only supports PSK + AuthenticationMode string `json:"authenticationMode,omitempty"` + + // ConnectorInitiationMode is the mode used by the local endpoint to establish an IKE Connection with the remote site. + // The default is INITIATOR. + // Possible values are: INITIATOR , RESPOND_ONLY , ON_DEMAND + // + // Note. Up to version 10.3 VCD only supports INITIATOR + ConnectorInitiationMode string `json:"connectorInitiationMode,omitempty"` + + // Version of IPsec VPN Tunnel configuration. Must not be set when creating, but required for updates + Version *struct { + // Version is incremented after each update + Version *int `json:"version,omitempty"` + } `json:"version,omitempty"` +} + +// NsxtIpSecVpnTunnelLocalEndpoint which corresponds to the Edge Gateway the IPsec VPN Tunnel is being configured on. +// Local Endpoint requires an IP. That IP must be sub-allocated to the edge gateway +type NsxtIpSecVpnTunnelLocalEndpoint struct { + // LocalId is the optional local identifier for the endpoint. It is usually the same as LocalAddress + LocalId string `json:"localId,omitempty"` + // LocalAddress is the IPv4 Address for the endpoint. This has to be a sub-allocated IP on the Edge Gateway. This is + // required + LocalAddress string `json:"localAddress"` + // LocalNetworks is the list of local networks. These must be specified in normal Network CIDR format. At least one is + // required + LocalNetworks []string `json:"localNetworks,omitempty"` +} + +// NsxtIpSecVpnTunnelRemoteEndpoint corresponds to the device on the remote site terminating the VPN tunnel +type NsxtIpSecVpnTunnelRemoteEndpoint struct { + // RemoteId is needed to uniquely identify the peer site. If this tunnel is using PSK authentication, + // the Remote ID is the public IP Address of the remote device terminating the VPN Tunnel. When NAT is configured on + // the Remote ID, enter the private IP Address of the Remote Site. If the remote ID is not set, VCD will set the + // remote ID to the remote address. + RemoteId string `json:"remoteId,omitempty"` + // RemoteAddress is IPv4 Address of the remote endpoint on the remote site. This is the Public IPv4 Address of the + // remote device terminating the IPsec VPN Tunnel connection. This is required + RemoteAddress string `json:"remoteAddress"` + // RemoteNetworks is the list of remote networks. These must be specified in normal Network CIDR format. + // Specifying no value is interpreted as 0.0.0.0/0 + RemoteNetworks []string `json:"remoteNetworks,omitempty"` +} + +// NsxtIpSecVpnTunnelStatus helps to read IPsec VPN Tunnel Status +type NsxtIpSecVpnTunnelStatus struct { + // TunnelStatus gives the overall IPsec VPN Tunnel Status. If IKE is properly set and the tunnel is up, the tunnel + // status will be UP + TunnelStatus string `json:"tunnelStatus"` + IkeStatus struct { + // IkeServiceStatus status for the actual IKE Session for the given tunnel. + IkeServiceStatus string `json:"ikeServiceStatus"` + // FailReason contains more details of failure if the IKE service is not UP + FailReason string `json:"failReason"` + } `json:"ikeStatus"` +} + +// NsxtIpSecVpnTunnelSecurityProfile specifies the given security profile/connection properties of a given IP Sec VPN +// Tunnel, such as Dead Probe Interval and IKE settings. If a security type is set to 'CUSTOM', then ike, tunnel, and/or +// dpd configurations can be specified. Otherwise, those fields are read only and are set to the values based on the +// specific security type. +type NsxtIpSecVpnTunnelSecurityProfile struct { + // SecurityType is the security type used for the IPSec Tunnel. If nothing is specified, this will be set to DEFAULT + // in which the default settings in NSX will be used. If CUSTOM is specified, then IKE, Tunnel, and DPD + // configurations can be set. + // To "RESET" configuration to DEFAULT, the NsxtIpSecVpnTunnel.SecurityType field should be changed instead of this + SecurityType string `json:"securityType,omitempty"` + // IkeConfiguration is the IKE Configuration to be used for the tunnel. If nothing is explicitly set, the system + // defaults will be used. + IkeConfiguration NsxtIpSecVpnTunnelProfileIkeConfiguration `json:"ikeConfiguration,omitempty"` + // TunnelConfiguration contains parameters such as encryption algorithm to be used. If nothing is explicitly set, + // the system defaults will be used. + TunnelConfiguration NsxtIpSecVpnTunnelProfileTunnelConfiguration `json:"tunnelConfiguration,omitempty"` + // DpdConfiguration contains Dead Peer Detection configuration. If nothing is explicitly set, the system defaults + // will be used. + DpdConfiguration NsxtIpSecVpnTunnelProfileDpdConfiguration `json:"dpdConfiguration,omitempty"` +} + +// NsxtIpSecVpnTunnelProfileIkeConfiguration is the Internet Key Exchange (IKE) profiles provide information about the +// algorithms that are used to authenticate, encrypt, and establish a shared secret between network sites when you +// establish an IKE tunnel. +// +// Note. While quite a few fields accepts a []string it actually supports single values only. +type NsxtIpSecVpnTunnelProfileIkeConfiguration struct { + // IkeVersion IKE Protocol Version to use. + // The default is IKE_V2. + // + // Possible values are: IKE_V1 , IKE_V2 , IKE_FLEX + IkeVersion string `json:"ikeVersion"` + // EncryptionAlgorithms contains list of Encryption algorithms for IKE. This is used during IKE negotiation. + // Default is AES_128. + // + // Possible values are: AES_128 , AES_256 , AES_GCM_128 , AES_GCM_192 , AES_GCM_256 + EncryptionAlgorithms []string `json:"encryptionAlgorithms"` + // DigestAlgorithms contains list of Digest algorithms - secure hashing algorithms to use during the IKE negotiation. + // + // Default is SHA2_256. + // + // Possible values are: SHA1 , SHA2_256 , SHA2_384 , SHA2_512 + DigestAlgorithms []string `json:"digestAlgorithms"` + // DhGroups contains list of Diffie-Hellman groups to be used if Perfect Forward Secrecy is enabled. These are + // cryptography schemes that allows the peer site and the edge gateway to establish a shared secret over an insecure + // communications channel + // + // Default is GROUP14. + // + // Possible values are: GROUP2, GROUP5, GROUP14, GROUP15, GROUP16, GROUP19, GROUP20, GROUP21 + DhGroups []string `json:"dhGroups"` + // SaLifeTime is the Security Association life time in seconds. It is number of seconds before the IPsec tunnel needs + // to reestablish + // + // Default is 86400 seconds (1 day). + SaLifeTime *int `json:"saLifeTime"` +} + +// NsxtIpSecVpnTunnelProfileTunnelConfiguration adjusts IPsec VPN Tunnel settings +// +// Note. While quite a few fields accepts a []string it actually supports single values only. +type NsxtIpSecVpnTunnelProfileTunnelConfiguration struct { + // PerfectForwardSecrecyEnabled enabled or disabled. PFS (Perfect Forward Secrecy) ensures the same key will not be + // generated and used again, and because of this, the VPN peers negotiate a new Diffie-Hellman key exchange. This + // would ensure if a hacker\criminal was to compromise the private key, they would only be able to access data in + // transit protected by that key. Any future data will not be compromised, as future data would not be associated + // with that compromised key. Both sides of the VPN must be able to support PFS in order for PFS to work. + // + // The default value is true. + PerfectForwardSecrecyEnabled bool `json:"perfectForwardSecrecyEnabled"` + // DfPolicy Policy for handling defragmentation bit. The default is COPY. + // + // Possible values are: COPY, CLEAR + // * COPY Copies the defragmentation bit from the inner IP packet to the outer packet. + // * CLEAR Ignores the defragmentation bit present in the inner packet. + DfPolicy string `json:"dfPolicy"` + + // EncryptionAlgorithms contains list of Encryption algorithms to use in IPSec tunnel establishment. + // Default is AES_GCM_128. + // * NO_ENCRYPTION_AUTH_AES_GMAC_XX (XX is 128, 192, 256) enables authentication on input data without encryption. + // If one of these options is used, digest algorithm should be empty. + // + // Possible values are: AES_128, AES_256, AES_GCM_128, AES_GCM_192, AES_GCM_256, NO_ENCRYPTION_AUTH_AES_GMAC_128, + // NO_ENCRYPTION_AUTH_AES_GMAC_192, NO_ENCRYPTION_AUTH_AES_GMAC_256, NO_ENCRYPTION + EncryptionAlgorithms []string `json:"encryptionAlgorithms"` + + // DigestAlgorithms contains list of Digest algorithms to be used for message digest. The default digest algorithm is + // implicitly covered by default encryption algorithm AES_GCM_128. + // + // Possible values are: SHA1 , SHA2_256 , SHA2_384 , SHA2_512 + // Note. Only one value can be set inside the slice + DigestAlgorithms []string `json:"digestAlgorithms"` + + // DhGroups contains list of Diffie-Hellman groups to be used is PFS is enabled. Default is GROUP14. + // + // Possible values are: GROUP2, GROUP5, GROUP14, GROUP15, GROUP16, GROUP19, GROUP20, GROUP21 + // Note. Only one value can be set inside the slice + DhGroups []string `json:"dhGroups"` + + // SaLifeTime is the Security Association life time in seconds. + // + // Default is 3600 seconds. + SaLifeTime *int `json:"saLifeTime"` +} + +// NsxtIpSecVpnTunnelProfileDpdConfiguration specifies the Dead Peer Detection Profile. This configurations determines +// the number of seconds to wait in time between probes to detect if an IPSec peer is alive or not. The default value +// for the DPD probe interval is 60 seconds. +type NsxtIpSecVpnTunnelProfileDpdConfiguration struct { + // ProbeInternal is value of the probe interval in seconds. This defines a periodic interval for DPD probes. The + // minimum is 3 seconds and the maximum is 60 seconds. + ProbeInterval int `json:"probeInterval"` +} + +// NsxtAlbController helps to integrate VMware Cloud Director with NSX-T Advanced Load Balancer deployment. +// Controller instances are registered with VMware Cloud Director instance. Controller instances serve as a central +// control plane for the load-balancing services provided by NSX-T Advanced Load Balancer. +// To configure an NSX-T ALB one needs to supply AVI Controller endpoint, credentials and license to be used. +type NsxtAlbController struct { + // ID holds URN for load balancer controller (e.g. urn:vcloud:loadBalancerController:aa23ef66-ba32-48b2-892f-7acdffe4587e) + ID string `json:"id,omitempty"` + // Name as shown in VCD + Name string `json:"name"` + // Description as shown in VCD + Description string `json:"description,omitempty"` + // Url of ALB controller + Url string `json:"url"` + // Username of user + Username string `json:"username"` + // Password (will not be returned on read) + Password string `json:"password,omitempty"` + // LicenseType By enabling this feature, the provider acknowledges that they have independently licensed the + // enterprise version of the NSX AVI LB. + // Possible options: 'BASIC', 'ENTERPRISE' + LicenseType string `json:"licenseType,omitempty"` + // Version of ALB (e.g. 20.1.3). Read-only + Version string `json:"version,omitempty"` +} + +// NsxtAlbImportableCloud allows user to list importable NSX-T ALB Clouds. Each importable cloud can only be imported +// once. It has a flag AlreadyImported which hints if it is already consumed or not. +type NsxtAlbImportableCloud struct { + // ID (e.g. 'cloud-43726181-f73e-41f2-bf1d-8a9609502586') + ID string `json:"id"` + + DisplayName string `json:"displayName"` + // AlreadyImported shows if this ALB Cloud is already imported + AlreadyImported bool `json:"alreadyImported"` + + // NetworkPoolRef contains a reference to NSX-T network pool + NetworkPoolRef OpenApiReference `json:"networkPoolRef"` + + // TransportZoneName contains transport zone name + TransportZoneName string `json:"transportZoneName"` +} + +// NsxtAlbCloud helps to use the virtual infrastructure provided by NSX Advanced Load Balancer, register NSX-T Cloud +// instances with VMware Cloud Director by consuming NsxtAlbImportableCloud. +type NsxtAlbCloud struct { + // ID (e.g. 'urn:vcloud:loadBalancerCloud:947ea2ba-e448-4249-91f7-1432b3d2fcbf') + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + // Name of NSX-T ALB Cloud + Name string `json:"name"` + // Description of NSX-T ALB Cloud + Description string `json:"description,omitempty"` + // LoadBalancerCloudBacking uniquely identifies a Load Balancer Cloud configured within a Load Balancer Controller. At + // the present, VCD only supports NSX-T Clouds configured within an NSX-ALB Controller deployment. + LoadBalancerCloudBacking NsxtAlbCloudBacking `json:"loadBalancerCloudBacking"` + // NetworkPoolRef for the Network Pool associated with this Cloud + NetworkPoolRef *OpenApiReference `json:"networkPoolRef"` + // HealthStatus contains status of the Load Balancer Cloud. Possible values are: + // UP - The cloud is healthy and ready to enable Load Balancer for an Edge Gateway. + // DOWN - The cloud is in a failure state. Enabling Load balancer on an Edge Gateway may not be possible. + // RUNNING - The cloud is currently processing. An example is if it's enabling a Load Balancer for an Edge Gateway. + // UNAVAILABLE - The cloud is unavailable. + // UNKNOWN - The cloud state is unknown. + HealthStatus string `json:"healthStatus,omitempty"` + // DetailedHealthMessage contains detailed message on the health of the Cloud. + DetailedHealthMessage string `json:"detailedHealthMessage,omitempty"` +} + +// NsxtAlbCloudBacking is embedded into NsxtAlbCloud +type NsxtAlbCloudBacking struct { + // BackingId is the ID of NsxtAlbImportableCloud + BackingId string `json:"backingId"` + // BackingType contains type of ALB (The only supported now is 'NSXALB_NSXT') + BackingType string `json:"backingType,omitempty"` + // LoadBalancerControllerRef contains reference to NSX-T ALB Controller + LoadBalancerControllerRef OpenApiReference `json:"loadBalancerControllerRef"` +} + +// NsxtAlbServiceEngineGroup provides virtual service management capabilities for tenants. This entity can be created +// by referencing a backing importable service engine group - NsxtAlbImportableServiceEngineGroups. +// +// A service engine group is an isolation domain that also defines shared service engine properties, such as size, +// network access, and failover. Resources in a service engine group can be used for different virtual services, +// depending on your tenant needs. These resources cannot be shared between different service engine groups. +type NsxtAlbServiceEngineGroup struct { + // ID of the Service Engine Group + ID string `json:"id,omitempty"` + // Name of the Service Engine Group + Name string `json:"name"` + // Description of the Service Engine Group + Description string `json:"description"` + // ServiceEngineGroupBacking holds backing details that uniquely identifies a Load Balancer Service Engine Group + // configured within a load balancer cloud. + ServiceEngineGroupBacking ServiceEngineGroupBacking `json:"serviceEngineGroupBacking"` + // HaMode defines High Availability Mode for Service Engine Group + // * ELASTIC_N_PLUS_M_BUFFER - Service Engines will scale out to N active nodes with M nodes as buffer. + // * ELASTIC_ACTIVE_ACTIVE - Active-Active with scale out. + // * LEGACY_ACTIVE_STANDBY - Traditional single Active-Standby configuration + HaMode string `json:"haMode,omitempty"` + // ReservationType can be `DEDICATED` or `SHARED` + // * DEDICATED - Dedicated to a single Edge Gateway and can only be assigned to a single Edge Gateway + // * SHARED - Shared between multiple Edge Gateways. Can be assigned to multiple Edge Gateways + ReservationType string `json:"reservationType"` + // MaxVirtualServices holds maximum number of virtual services supported on the Load Balancer Service Engine Group + MaxVirtualServices *int `json:"maxVirtualServices,omitempty"` + // NumDeployedVirtualServices shows number of virtual services currently deployed on the Load Balancer Service Engine + // Group + NumDeployedVirtualServices *int `json:"numDeployedVirtualServices,omitempty"` + // ReservedVirtualServices holds number of virtual services already reserved on the Load Balancer Service Engine Group. + // This value is the sum of the guaranteed virtual services given to Edge Gateways assigned to the Load Balancer + // Service Engine Group. + ReservedVirtualServices *int `json:"reservedVirtualServices,omitempty"` + // OverAllocated indicates whether the maximum number of virtual services supported on the Load Balancer Service + // Engine Group has been surpassed by the current number of reserved virtual services. + OverAllocated *bool `json:"overAllocated,omitempty"` +} + +type ServiceEngineGroupBacking struct { + BackingId string `json:"backingId"` + BackingType string `json:"backingType,omitempty"` + LoadBalancerCloudRef *OpenApiReference `json:"loadBalancerCloudRef"` +} + +// NsxtAlbImportableServiceEngineGroups provides capability to list all Importable Service Engine Groups available in +// ALB Controller so that they can be consumed by NsxtAlbServiceEngineGroup +// +// Note. The API does not return Importable Service Engine Group once it is consumed. +type NsxtAlbImportableServiceEngineGroups struct { + // ID (e.g. 'serviceenginegroup-b633f16f-2733-4bf5-b552-3a6c4949caa4') + ID string `json:"id"` + // DisplayName is the name of + DisplayName string `json:"displayName"` + // HaMode (e.g. 'ELASTIC_N_PLUS_M_BUFFER') + HaMode string `json:"haMode"` +} + +// NsxtAlbConfig describes Load Balancer Service configuration on an NSX-T Edge Gateway +type NsxtAlbConfig struct { + // Enabled is a mandatory flag indicating whether Load Balancer Service is enabled or not + Enabled bool `json:"enabled"` + // LicenseType of the backing Load Balancer Cloud. + // * BASIC - Basic edition of the NSX Advanced Load Balancer. + // * ENTERPRISE - Full featured edition of the NSX Advanced Load Balancer. + LicenseType string `json:"licenseType,omitempty"` + // LoadBalancerCloudRef + LoadBalancerCloudRef *OpenApiReference `json:"loadBalancerCloudRef,omitempty"` + + // ServiceNetworkDefinition in Gateway CIDR format which will be used by Load Balancer service. All the load balancer + // service engines associated with the Service Engine Group will be attached to this network. The subnet prefix length + // must be 25. If nothing is set, the default is 192.168.255.1/25. Default CIDR can be configured. This field cannot + // be updated. + ServiceNetworkDefinition string `json:"serviceNetworkDefinition,omitempty"` +} + +// NsxtAlbServiceEngineGroupAssignment configures Service Engine Group assignments to Edge Gateway. The only mandatory +// fields are `GatewayRef` and `ServiceEngineGroupRef`. `MinVirtualServices` and `MaxVirtualServices` are only available +// for SHARED Service Engine Groups. +type NsxtAlbServiceEngineGroupAssignment struct { + ID string `json:"id,omitempty"` + // GatewayRef contains reference to Edge Gateway + GatewayRef *OpenApiReference `json:"gatewayRef"` + // ServiceEngineGroupRef contains a reference to Service Engine Group + ServiceEngineGroupRef *OpenApiReference `json:"serviceEngineGroupRef"` + // GatewayOrgRef optional Org reference for gateway + GatewayOrgRef *OpenApiReference `json:"gatewayOrgRef,omitempty"` + // GatewayOwnerRef can be a VDC or VDC group + GatewayOwnerRef *OpenApiReference `json:"gatewayOwnerRef,omitempty"` + MaxVirtualServices *int `json:"maxVirtualServices,omitempty"` + MinVirtualServices *int `json:"minVirtualServices,omitempty"` + // NumDeployedVirtualServices is a read only value + NumDeployedVirtualServices int `json:"numDeployedVirtualServices,omitempty"` +} + +// NsxtAlbPool defines configuration of a single NSX-T ALB Pool. Pools maintain the list of servers assigned to them and +// perform health monitoring, load balancing, persistence. A pool may only be used or referenced by only one virtual +// service at a time. +type NsxtAlbPool struct { + ID string `json:"id,omitempty"` + // Name is mandatory + Name string `json:"name"` + // Description is optional + Description string `json:"description,omitempty"` + + // GatewayRef is mandatory and associates NSX-T Edge Gateway with this Load Balancer Pool. + GatewayRef OpenApiReference `json:"gatewayRef"` + + // Enabled defines if the Pool is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Algorithm for choosing a member within the pools list of available members for each new connection. + // Default value is LEAST_CONNECTIONS + // Supported algorithms are: + // * LEAST_CONNECTIONS + // * ROUND_ROBIN + // * CONSISTENT_HASH (uses Source IP Address hash) + // * FASTEST_RESPONSE + // * LEAST_LOAD + // * FEWEST_SERVERS + // * RANDOM + // * FEWEST_TASKS + // * CORE_AFFINITY + Algorithm string `json:"algorithm,omitempty"` + + // DefaultPort defines destination server port used by the traffic sent to the member. + DefaultPort *int `json:"defaultPort,omitempty"` + + // GracefulTimeoutPeriod sets maximum time (in minutes) to gracefully disable a member. Virtual service waits for the + // specified time before terminating the existing connections to the pool members that are disabled. + // + // Special values: 0 represents Immediate, -1 represents Infinite. + GracefulTimeoutPeriod *int `json:"gracefulTimeoutPeriod,omitempty"` + + // PassiveMonitoringEnabled sets if client traffic should be used to check if pool member is up or down. + PassiveMonitoringEnabled *bool `json:"passiveMonitoringEnabled,omitempty"` + + // HealthMonitors check member servers health. It can be monitored by using one or more health monitors. Active + // monitors generate synthetic traffic and mark a server up or down based on the response. + HealthMonitors []NsxtAlbPoolHealthMonitor `json:"healthMonitors,omitempty"` + + // Members field defines list of destination servers which are used by the Load Balancer Pool to direct load balanced + // traffic. + Members []NsxtAlbPoolMember `json:"members,omitempty"` + + // CaCertificateRefs point to root certificates to use when validating certificates presented by the pool members. + CaCertificateRefs []OpenApiReference `json:"caCertificateRefs,omitempty"` + + // CommonNameCheckEnabled specifies whether to check the common name of the certificate presented by the pool member. + // This cannot be enabled if no caCertificateRefs are specified. + CommonNameCheckEnabled *bool `json:"commonNameCheckEnabled,omitempty"` + + // DomainNames holds a list of domain names which will be used to verify the common names or subject alternative + // names presented by the pool member certificates. It is performed only when common name check + // (CommonNameCheckEnabled) is enabled. If common name check is enabled, but domain names are not specified then the + // incoming host header will be used to check the certificate. + DomainNames []string `json:"domainNames,omitempty"` + + // PersistenceProfile of a Load Balancer Pool. Persistence profile will ensure that the same user sticks to the same + // server for a desired duration of time. If the persistence profile is unmanaged by Cloud Director, updates that + // leave the values unchanged will continue to use the same unmanaged profile. Any changes made to the persistence + // profile will cause Cloud Director to switch the pool to a profile managed by Cloud Director. + PersistenceProfile *NsxtAlbPoolPersistenceProfile `json:"persistenceProfile,omitempty"` + + // MemberCount is a read only value that reports number of members added + MemberCount int `json:"memberCount,omitempty"` + + // EnabledMemberCount is a read only value that reports number of enabled members + EnabledMemberCount int `json:"enabledMemberCount,omitempty"` + + // UpMemberCount is a read only value that reports number of members that are serving traffic + UpMemberCount int `json:"upMemberCount,omitempty"` + + // HealthMessage shows a pool health status (e.g. "The pool is unassigned.") + HealthMessage string `json:"healthMessage,omitempty"` + + // VirtualServiceRefs holds list of Load Balancer Virtual Services associated with this Load balancer Pool. + VirtualServiceRefs []OpenApiReference `json:"virtualServiceRefs,omitempty"` +} + +// NsxtAlbPoolHealthMonitor checks member servers health. Active monitor generates synthetic traffic and mark a server +// up or down based on the response. +type NsxtAlbPoolHealthMonitor struct { + Name string `json:"name,omitempty"` + // SystemDefined is a boolean value + SystemDefined bool `json:"systemDefined,omitempty"` + // Type + // * HTTP - HTTP request/response is used to validate health. + // * HTTPS - Used against HTTPS encrypted web servers to validate health. + // * TCP - TCP connection is used to validate health. + // * UDP - A UDP datagram is used to validate health. + // * PING - An ICMP ping is used to validate health. + Type string `json:"type"` +} + +// NsxtAlbPoolMember defines a single destination server which is used by the Load Balancer Pool to direct load balanced +// traffic. +type NsxtAlbPoolMember struct { + // Enabled defines if member is enabled (will receive incoming requests) or not + Enabled bool `json:"enabled"` + // IpAddress of the Load Balancer Pool member. + IpAddress string `json:"ipAddress"` + + // Port number of the Load Balancer Pool member. If unset, the port that the client used to connect will be used. + Port int `json:"port,omitempty"` + + // Ratio of selecting eligible servers in the pool. + Ratio *int `json:"ratio,omitempty"` + + // MarkedDownBy gives the names of the health monitors that marked the member as down when it is DOWN. If a monitor + // cannot be determined, the value will be UNKNOWN. + MarkedDownBy []string `json:"markedDownBy,omitempty"` + + // HealthStatus of the pool member. Possible values are: + // * UP - The member is operational + // * DOWN - The member is down + // * DISABLED - The member is disabled + // * UNKNOWN - The state is unknown + HealthStatus string `json:"healthStatus,omitempty"` + + // DetailedHealthMessage contains non-localized detailed message on the health of the pool member. + DetailedHealthMessage string `json:"detailedHealthMessage,omitempty"` +} + +// NsxtAlbPoolPersistenceProfile holds Persistence Profile of a Load Balancer Pool. Persistence profile will ensure that +// the same user sticks to the same server for a desired duration of time. If the persistence profile is unmanaged by +// Cloud Director, updates that leave the values unchanged will continue to use the same unmanaged profile. Any changes +// made to the persistence profile will cause Cloud Director to switch the pool to a profile managed by Cloud Director. +type NsxtAlbPoolPersistenceProfile struct { + // Name field is tricky. It remains empty in some case, but if it is sent it can become computed. + // (e.g. setting 'CUSTOM_HTTP_HEADER' results in value being + // 'VCD-LoadBalancer-3510eae9-53bb-49f1-b7aa-7aedf5ce3a77-CUSTOM_HTTP_HEADER') + Name string `json:"name,omitempty"` + + // Type of persistence strategy to use. Supported values are: + // * CLIENT_IP - The clients IP is used as the identifier and mapped to the server + // * HTTP_COOKIE - Load Balancer inserts a cookie into HTTP responses. Cookie name must be provided as value + // * CUSTOM_HTTP_HEADER - Custom, static mappings of header values to specific servers are used. Header name must be + // provided as value + // * APP_COOKIE - Load Balancer reads existing server cookies or URI embedded data such as JSessionID. Cookie name + // must be provided as value + // * TLS - Information is embedded in the client's SSL/TLS ticket ID. This will use default system profile + // System-Persistence-TLS + Type string `json:"type,omitempty"` + + // Value of attribute based on selected persistence type. + // This is required for HTTP_COOKIE, CUSTOM_HTTP_HEADER and APP_COOKIE persistence types. + // + // HTTP_COOKIE, APP_COOKIE must have cookie name set as the value and CUSTOM_HTTP_HEADER must have header name set as + // the value. + Value string `json:"value,omitempty"` +} + +// NsxtAlbVirtualService combines Load Balancer Pools with Service Engine Groups and exposes a virtual service on +// defined VIP (virtual IP address) while optionally allowing to use encrypted traffic +type NsxtAlbVirtualService struct { + ID string `json:"id,omitempty"` + + // Name contains meaningful name + Name string `json:"name,omitempty"` + + // Description is optional + Description string `json:"description,omitempty"` + + // Enabled defines if the virtual service is enabled to accept traffic + Enabled *bool `json:"enabled"` + + // ApplicationProfile sets protocol for load balancing by using NsxtAlbVirtualServiceApplicationProfile + ApplicationProfile NsxtAlbVirtualServiceApplicationProfile `json:"applicationProfile"` + + // GatewayRef contains NSX-T Edge Gateway reference + GatewayRef OpenApiReference `json:"gatewayRef"` + //LoadBalancerPoolRef contains Pool reference + LoadBalancerPoolRef OpenApiReference `json:"loadBalancerPoolRef"` + // ServiceEngineGroupRef points to service engine group (which must be assigned to NSX-T Edge Gateway) + ServiceEngineGroupRef OpenApiReference `json:"serviceEngineGroupRef"` + + // CertificateRef contains certificate reference if serving encrypted traffic + CertificateRef *OpenApiReference `json:"certificateRef,omitempty"` + + // ServicePorts define one or more ports (or port ranges) of the virtual service + ServicePorts []NsxtAlbVirtualServicePort `json:"servicePorts"` + + // VirtualIpAddress to be used for exposing this virtual service + VirtualIpAddress string `json:"virtualIpAddress"` + + // HealthStatus contains status of the Load Balancer Cloud. Possible values are: + // UP - The cloud is healthy and ready to enable Load Balancer for an Edge Gateway. + // DOWN - The cloud is in a failure state. Enabling Load balancer on an Edge Gateway may not be possible. + // RUNNING - The cloud is currently processing. An example is if it's enabling a Load Balancer for an Edge Gateway. + // UNAVAILABLE - The cloud is unavailable. + // UNKNOWN - The cloud state is unknown. + HealthStatus string `json:"healthStatus,omitempty"` + + // HealthMessage shows a pool health status (e.g. "The pool is unassigned.") + HealthMessage string `json:"healthMessage,omitempty"` + + // DetailedHealthMessage containes a more in depth health message + DetailedHealthMessage string `json:"detailedHealthMessage,omitempty"` +} + +// NsxtAlbVirtualServicePort port (or port ranges) of the virtual service +type NsxtAlbVirtualServicePort struct { + // PortStart is always required + PortStart *int `json:"portStart"` + // PortEnd is only required if a port range is specified. For single port cases PortStart is sufficient + PortEnd *int `json:"portEnd,omitempty"` + // SslEnabled defines if traffic is served as secure. CertificateRef must be specified in NsxtAlbVirtualService when + // true + SslEnabled *bool `json:"sslEnabled,omitempty"` + // TcpUdpProfile defines + TcpUdpProfile *NsxtAlbVirtualServicePortTcpUdpProfile `json:"tcpUdpProfile,omitempty"` +} + +// NsxtAlbVirtualServicePortTcpUdpProfile profile determines the type and settings of the network protocol that a +// subscribing virtual service will use. It sets a number of parameters, such as whether the virtual service is a TCP +// proxy versus a pass-through via fast path. A virtual service can have both TCP and UDP enabled, which is useful for +// protocols such as DNS or syslog. +type NsxtAlbVirtualServicePortTcpUdpProfile struct { + SystemDefined bool `json:"systemDefined"` + // Type defines L4 or L4_TLS profiles: + // * TCP_PROXY (the only possible type when L4_TLS is used). Enabling TCP Proxy causes ALB to terminate an inbound + // connection from a client. Any application data from the client that is destined for a server is forwarded to that + // server over a new TCP connection. Separating (or proxying) the client-to-server connections enables ALB to provide + // enhanced security, such as TCP protocol sanitization or DoS mitigation. It also provides better client and server + // performance, such as maximizing client and server TCP MSS or window sizes independently and buffering server + // responses. One must use a TCP/UDP profile with the type set to Proxy for application profiles such as HTTP. + // + // * TCP_FAST_PATH profile does not proxy TCP connections - rather, it directly connects clients to the + // destination server and translates the client's destination virtual service address with the chosen destination + // server's IP address. The client's source IP address is still translated to the Service Engine address to ensure + // that server response traffic returns symmetrically. + // + // * UDP_FAST_PATH profile enables a virtual service to support UDP. Avi Vantage translates the client's destination + // virtual service address to the destination server and rewrites the client's source IP address to the Service + // Engine's address when forwarding the packet to the server. This ensures that server response traffic traverses + // symmetrically through the original SE. + Type string `json:"type"` +} + +// NsxtAlbVirtualServiceApplicationProfile sets protocol for load balancing. Type field defines possible options. +type NsxtAlbVirtualServiceApplicationProfile struct { + SystemDefined bool `json:"systemDefined,omitempty"` + // Type defines Traffic + // * HTTP + // * HTTPS (certificate reference is mandatory) + // * L4 + // * L4 TLS (certificate reference is mandatory) + Type string `json:"type"` +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxv_types.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxv_types.go new file mode 100644 index 000000000..0c0750429 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/nsxv_types.go @@ -0,0 +1,450 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package types + +import "encoding/xml" + +// FirewallConfigWithXml allows to enable/disable firewall on a specific edge gateway +// Reference: vCloud Director API for NSX Programming Guide +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +// +// Warning. It nests all firewall rules because Edge Gateway API is done so that if this data is not +// sent while enabling it would wipe all firewall rules. InnerXML type field is used with struct tag +//`innerxml` to prevent any manipulation of configuration and sending it verbatim +type FirewallConfigWithXml struct { + XMLName xml.Name `xml:"firewall"` + Enabled bool `xml:"enabled"` + DefaultPolicy FirewallDefaultPolicy `xml:"defaultPolicy"` + + // Each configuration change has a version number + Version string `xml:"version,omitempty"` + + // The below field has `innerxml` tag so that it is not processed but instead + // sent verbatim + FirewallRules InnerXML `xml:"firewallRules,omitempty"` + GlobalConfig InnerXML `xml:"globalConfig,omitempty"` +} + +// FirewallDefaultPolicy represent default rule +type FirewallDefaultPolicy struct { + LoggingEnabled bool `xml:"loggingEnabled"` + Action string `xml:"action"` +} + +// LbGeneralParamsWithXml allows to enable/disable load balancing capabilities on specific edge gateway +// Reference: vCloud Director API for NSX Programming Guide +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +// +// Warning. It nests all components (LbMonitor, LbPool, LbAppProfile, LbAppRule, LbVirtualServer) +// because Edge Gateway API is done so that if this data is not sent while enabling it would wipe +// all load balancer configurations. InnerXML type fields are used with struct tag `innerxml` to +// prevent any manipulation of configuration and sending it verbatim +type LbGeneralParamsWithXml struct { + XMLName xml.Name `xml:"loadBalancer"` + Enabled bool `xml:"enabled"` + AccelerationEnabled bool `xml:"accelerationEnabled"` + Logging *LbLogging `xml:"logging"` + + // This field is not used anywhere but needs to be passed through + EnableServiceInsertion bool `xml:"enableServiceInsertion"` + // Each configuration change has a version number + Version string `xml:"version,omitempty"` + + // The below fields have `innerxml` tag so that they are not processed but instead + // sent verbatim + VirtualServers []InnerXML `xml:"virtualServer,omitempty"` + Pools []InnerXML `xml:"pool,omitempty"` + AppProfiles []InnerXML `xml:"applicationProfile,omitempty"` + Monitors []InnerXML `xml:"monitor,omitempty"` + AppRules []InnerXML `xml:"applicationRule,omitempty"` +} + +// LbLogging represents logging configuration for load balancer +type LbLogging struct { + Enable bool `xml:"enable"` + LogLevel string `xml:"logLevel"` +} + +// InnerXML is meant to be used when unmarshaling a field into text rather than struct +// It helps to avoid missing out any fields which may not have been specified in the struct. +type InnerXML struct { + Text string `xml:",innerxml"` +} + +// LbMonitor defines health check parameters for a particular type of network traffic +// Reference: vCloud Director API for NSX Programming Guide +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type LbMonitor struct { + XMLName xml.Name `xml:"monitor"` + ID string `xml:"monitorId,omitempty"` + Type string `xml:"type"` + Interval int `xml:"interval,omitempty"` + Timeout int `xml:"timeout,omitempty"` + MaxRetries int `xml:"maxRetries,omitempty"` + Method string `xml:"method,omitempty"` + URL string `xml:"url,omitempty"` + Expected string `xml:"expected,omitempty"` + Name string `xml:"name,omitempty"` + Send string `xml:"send,omitempty"` + Receive string `xml:"receive,omitempty"` + Extension string `xml:"extension,omitempty"` +} + +type LbMonitors []LbMonitor + +// LbPool represents a load balancer server pool as per "vCloud Director API for NSX Programming Guide" +// Type: LBPoolHealthCheckType +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type LbPool struct { + XMLName xml.Name `xml:"pool"` + ID string `xml:"poolId,omitempty"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + Algorithm string `xml:"algorithm"` + AlgorithmParameters string `xml:"algorithmParameters,omitempty"` + Transparent bool `xml:"transparent"` + MonitorId string `xml:"monitorId,omitempty"` + Members LbPoolMembers `xml:"member,omitempty"` +} + +type LbPools []LbPool + +// LbPoolMember represents a single member inside LbPool +type LbPoolMember struct { + ID string `xml:"memberId,omitempty"` + Name string `xml:"name"` + IpAddress string `xml:"ipAddress"` + Weight int `xml:"weight,omitempty"` + MonitorPort int `xml:"monitorPort,omitempty"` + Port int `xml:"port"` + MaxConn int `xml:"maxConn,omitempty"` + MinConn int `xml:"minConn,omitempty"` + Condition string `xml:"condition,omitempty"` +} + +type LbPoolMembers []LbPoolMember + +// LbAppProfile represents a load balancer application profile as per "vCloud Director API for NSX +// Programming Guide" +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type LbAppProfile struct { + XMLName xml.Name `xml:"applicationProfile"` + ID string `xml:"applicationProfileId,omitempty"` + Name string `xml:"name,omitempty"` + SslPassthrough bool `xml:"sslPassthrough"` + Template string `xml:"template,omitempty"` + HttpRedirect *LbAppProfileHttpRedirect `xml:"httpRedirect,omitempty"` + Persistence *LbAppProfilePersistence `xml:"persistence,omitempty"` + InsertXForwardedForHttpHeader bool `xml:"insertXForwardedFor"` + ServerSslEnabled bool `xml:"serverSslEnabled"` +} + +type LbAppProfiles []LbAppProfile + +// LbAppProfilePersistence defines persistence profile settings in LbAppProfile +type LbAppProfilePersistence struct { + XMLName xml.Name `xml:"persistence"` + Method string `xml:"method,omitempty"` + CookieName string `xml:"cookieName,omitempty"` + CookieMode string `xml:"cookieMode,omitempty"` + Expire int `xml:"expire,omitempty"` +} + +// LbAppProfileHttpRedirect defines http redirect settings in LbAppProfile +type LbAppProfileHttpRedirect struct { + XMLName xml.Name `xml:"httpRedirect"` + To string `xml:"to,omitempty"` +} + +// LbAppRule represents a load balancer application rule as per "vCloud Director API for NSX +// Programming Guide" +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type LbAppRule struct { + XMLName xml.Name `xml:"applicationRule"` + ID string `xml:"applicationRuleId,omitempty"` + Name string `xml:"name,omitempty"` + Script string `xml:"script,omitempty"` +} + +type LbAppRules []LbAppRule + +// LbVirtualServer represents a load balancer virtual server as per "vCloud Director API for NSX +// Programming Guide" +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type LbVirtualServer struct { + XMLName xml.Name `xml:"virtualServer"` + ID string `xml:"virtualServerId,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + Enabled bool `xml:"enabled"` + IpAddress string `xml:"ipAddress"` + Protocol string `xml:"protocol"` + Port int `xml:"port"` + AccelerationEnabled bool `xml:"accelerationEnabled"` + ConnectionLimit int `xml:"connectionLimit,omitempty"` + ConnectionRateLimit int `xml:"connectionRateLimit,omitempty"` + ApplicationProfileId string `xml:"applicationProfileId,omitempty"` + DefaultPoolId string `xml:"defaultPoolId,omitempty"` + ApplicationRuleIds []string `xml:"applicationRuleId,omitempty"` +} + +// EdgeNatRule contains shared structure for SNAT and DNAT rule configuration using +// NSX-V proxied edge gateway endpoint +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type EdgeNatRule struct { + XMLName xml.Name `xml:"natRule"` + ID string `xml:"ruleId,omitempty"` + RuleType string `xml:"ruleType,omitempty"` + RuleTag string `xml:"ruleTag,omitempty"` + Action string `xml:"action"` + Vnic *int `xml:"vnic,omitempty"` + OriginalAddress string `xml:"originalAddress"` + TranslatedAddress string `xml:"translatedAddress"` + LoggingEnabled bool `xml:"loggingEnabled"` + Enabled bool `xml:"enabled"` + Description string `xml:"description,omitempty"` + Protocol string `xml:"protocol,omitempty"` + OriginalPort string `xml:"originalPort,omitempty"` + TranslatedPort string `xml:"translatedPort,omitempty"` + IcmpType string `xml:"icmpType,omitempty"` +} + +// EdgeFirewall holds data for creating firewall rule using proxied NSX-V API +// https://code.vmware.com/docs/6900/vcloud-director-api-for-nsx-programming-guide +type EdgeFirewallRule struct { + XMLName xml.Name `xml:"firewallRule" ` + ID string `xml:"id,omitempty"` + Name string `xml:"name,omitempty"` + RuleType string `xml:"ruleType,omitempty"` + RuleTag string `xml:"ruleTag,omitempty"` + Source EdgeFirewallEndpoint `xml:"source" ` + Destination EdgeFirewallEndpoint `xml:"destination"` + Application EdgeFirewallApplication `xml:"application"` + MatchTranslated *bool `xml:"matchTranslated,omitempty"` + Direction string `xml:"direction,omitempty"` + Action string `xml:"action,omitempty"` + Enabled bool `xml:"enabled"` + LoggingEnabled bool `xml:"loggingEnabled"` +} + +// EdgeFirewallEndpoint can contains slices of objects for source or destination in EdgeFirewall +type EdgeFirewallEndpoint struct { + Exclude bool `xml:"exclude"` + VnicGroupIds []string `xml:"vnicGroupId,omitempty"` + GroupingObjectIds []string `xml:"groupingObjectId,omitempty"` + IpAddresses []string `xml:"ipAddress,omitempty"` +} + +// EdgeFirewallApplication Wraps []EdgeFirewallApplicationService for multiple protocol/port specification +type EdgeFirewallApplication struct { + ID string `xml:"applicationId,omitempty"` + Services []EdgeFirewallApplicationService `xml:"service,omitempty"` +} + +// EdgeFirewallApplicationService defines port/protocol details for one service in EdgeFirewallRule +type EdgeFirewallApplicationService struct { + Protocol string `xml:"protocol,omitempty"` + Port string `xml:"port,omitempty"` + SourcePort string `xml:"sourcePort,omitempty"` +} + +// EdgeIpSet defines a group of IP addresses that you can add as the source or destination in a +// firewall rule or in DHCP relay configuration. The object itself has more fields in API response, +// however vCD UI only uses the below mentioned. It looks as if the other fields are used in NSX +// internally and are simply proxied back. +// +// Note. Only advanced edge gateways support IP sets +type EdgeIpSet struct { + XMLName xml.Name `xml:"ipset"` + // ID holds composite ID of IP set which is formatted as + // 'f9daf2da-b4f9-4921-a2f4-d77a943a381c:ipset-4' where the first segment before colon is vDC id + // and the second one is IP set ID + ID string `xml:"objectId,omitempty"` + // Name is mandatory and must be unique + Name string `xml:"name"` + // Description - optional + Description string `xml:"description,omitempty"` + // IPAddresses is a mandatory field with comma separated values. The API is known to re-order + // data after submiting and may shuffle components even if re-submitted as it was return from + // API itself + // (eg: "192.168.200.1,192.168.200.1/24,192.168.200.1-192.168.200.24") + IPAddresses string `xml:"value"` + // InheritanceAllowed defines visibility at underlying scopes + InheritanceAllowed *bool `xml:"inheritanceAllowed"` + // Revision is a "version" of IP set configuration. During read current revision is being + // returned and when update is performed this latest version must be sent as it validates if no + // updates ocurred in between. When not the latest version is being sent during update one can + // expect similar error response from API: "The object ipset-27 used in this operation has an + // older version 0 than the current system version 1. Refresh UI or fetch the latest copy of the + // object and retry operation." + Revision *int `xml:"revision,omitempty"` +} + +// EdgeIpSets is a slice of pointers to EdgeIpSet +type EdgeIpSets []*EdgeIpSet + +// EdgeGatewayVnics is a data structure holding information of vNic configuration in NSX-V edge +// gateway using "/network/edges/edge_id/vnics" endpoint +type EdgeGatewayVnics struct { + XMLName xml.Name `xml:"vnics"` + Vnic []struct { + Label string `xml:"label"` + Name string `xml:"name"` + AddressGroups struct { + AddressGroup struct { + PrimaryAddress string `xml:"primaryAddress,omitempty"` + SecondaryAddresses struct { + IpAddress []string `xml:"ipAddress,omitempty"` + } `xml:"secondaryAddresses,omitempty"` + SubnetMask string `xml:"subnetMask,omitempty"` + SubnetPrefixLength string `xml:"subnetPrefixLength,omitempty"` + } `xml:"addressGroup,omitempty"` + } `xml:"addressGroups,omitempty"` + Mtu string `xml:"mtu,omitempty"` + Type string `xml:"type,omitempty"` + IsConnected string `xml:"isConnected,omitempty"` + Index *int `xml:"index"` + PortgroupId string `xml:"portgroupId,omitempty"` + PortgroupName string `xml:"portgroupName,omitempty"` + EnableProxyArp string `xml:"enableProxyArp,omitempty"` + EnableSendRedirects string `xml:"enableSendRedirects,omitempty"` + SubInterfaces struct { + SubInterface []struct { + IsConnected string `xml:"isConnected,omitempty"` + Label string `xml:"label,omitempty"` + Name string `xml:"name,omitempty"` + Index *int `xml:"index,omitempty"` + TunnelId string `xml:"tunnelId,omitempty"` + LogicalSwitchId string `xml:"logicalSwitchId,omitempty"` + LogicalSwitchName string `xml:"logicalSwitchName,omitempty"` + EnableSendRedirects string `xml:"enableSendRedirects,omitempty"` + Mtu string `xml:"mtu,omitempty"` + AddressGroups struct { + AddressGroup struct { + PrimaryAddress string `xml:"primaryAddress,omitempty"` + SubnetMask string `xml:"subnetMask,omitempty"` + SubnetPrefixLength string `xml:"subnetPrefixLength,omitempty"` + } `xml:"addressGroup,omitempty"` + } `xml:"addressGroups,omitempty"` + } `xml:"subInterface,omitempty"` + } `xml:"subInterfaces,omitempty"` + } `xml:"vnic,omitempty"` +} + +// EdgeGatewayInterfaces is a data structure holding information of vNic configuration in NSX-V edge +// gateway using "/network/edges/edge_id/vdcNetworks" endpoint +type EdgeGatewayInterfaces struct { + XMLName xml.Name `xml:"edgeInterfaces"` + EdgeInterface []struct { + Name string `xml:"name"` + Type string `xml:"type"` + Index *int `xml:"index"` + NetworkReference struct { + ID string `xml:"id"` + Name string `xml:"name"` + Type string `xml:"type"` + } `xml:"networkReference"` + AddressGroups struct { + AddressGroup struct { + PrimaryAddress string `xml:"primaryAddress"` + SubnetMask string `xml:"subnetMask"` + SubnetPrefixLength string `xml:"subnetPrefixLength"` + SecondaryAddresses struct { + IpAddress []string `xml:"ipAddress"` + } `xml:"secondaryAddresses"` + } `xml:"addressGroup"` + } `xml:"addressGroups"` + PortgroupId string `xml:"portgroupId"` + PortgroupName string `xml:"portgroupName"` + IsConnected string `xml:"isConnected"` + TunnelId string `xml:"tunnelId"` + } `xml:"edgeInterface"` +} + +// EdgeDhcpRelay - Dynamic Host Configuration Protocol (DHCP) relay enables you to leverage your +// existing DHCP infrastructure from within NSX without any interruption to the IP address +// management in your environment. DHCP messages are relayed from virtual machine(s) to the +// designated DHCP server(s) in the physical world. This enables IP addresses within NSX to continue +// to be in sync with IP addresses in other environments. +type EdgeDhcpRelay struct { + XMLName xml.Name `xml:"relay"` + // RelayServer specifies external relay server(s) to which DHCP messages are to be relayed to. + // The relay server can be an IP set, IP address block, domain, or a combination of all of + // these. Messages are relayed to each listed DHCP server. + RelayServer *EdgeDhcpRelayServer `xml:"relayServer"` + // EdgeDhcRelayAgents specifies a list of edge gateway interfaces (vNics) from which DHCP + // messages are to be relayed to the external DHCP relay server(s) with optional gateway + // interface addresses. + RelayAgents *EdgeDhcpRelayAgents `xml:"relayAgents"` +} + +type EdgeDhcpRelayServer struct { + // GroupingObjectIds is a general concept in NSX which allows to pass in many types of objects + // (like VM IDs, IP set IDs, org networks, security groups) howether in this case it accepts + // only IP sets which have IDs specified as 'f9daf2da-b4f9-4921-a2f4-d77a943a381c:ipset-2' where + // first part is vDC ID and the second part is unique IP set ID + GroupingObjectId []string `xml:"groupingObjectId,omitempty"` + // IpAddresses holds a list of IP addresses for DHCP servers + IpAddress []string `xml:"ipAddress,omitempty"` + // Fqdn holds a list of FQDNs (fully qualified domain names) + Fqdns []string `xml:"fqdn,omitempty"` +} + +// EdgeDhcpRelayAgent specifies which edge gateway interface (vNic) from which DHCP messages are to +// be relayed to the external DHCP relay server(s) with an optional gateway interface address. +type EdgeDhcpRelayAgent struct { + // VnicIndex must specify vNic adapter index on the edge gateway + VnicIndex *int `xml:"vnicIndex"` + // GatewayInterfaceAddress holds a gateway interface address. Optional, defaults to the vNic + // primary address. + GatewayInterfaceAddress string `xml:"giAddress,omitempty"` +} + +// EdgeDhcpRelayAgents holds a slice of EdgeDhcpRelayAgent +type EdgeDhcpRelayAgents struct { + Agents []EdgeDhcpRelayAgent `xml:"relayAgent"` +} + +// EdgeDhcpLease holds a list of EdgeDhcpLeaseInfo +type EdgeDhcpLease struct { + XMLName xml.Name `xml:"dhcpLeaseInfo"` + DhcpLeaseInfos []*EdgeDhcpLeaseInfo `xml:"leaseInfo"` +} + +// EdgeDhcpLeaseInfo contains information about DHCP leases provided by NSX-V edge gateway +type EdgeDhcpLeaseInfo struct { + // Uid statement records the client identifier used by the client to acquire the lease. Clients + // are not required to send client identifiers, and this statement only appears if the client + // did in fact send one. + Uid string `xml:"uid"` + // MacAddress holds hardware (MAC) address of requester (e.g. "00:50:56:01:29:c8") + MacAddress string `xml:"macAddress"` + // IpAddress holds the IP address assigned to a particular MAC address (e.g. "10.10.10.100") + IpAddress string `xml:"ipAddress"` + // ClientHostname Most DHCP clients will send their hostname in the host-name option. If a + // client sends its hostname in this way, the hostname is recorded on the lease with a + // client-hostname statement. This is not required by the protocol, however, so many specialized + // DHCP clients do not send a host-name option. + ClientHostname string `xml:"clientHostname"` + // BindingState declares the lease’s binding state. When the DHCP server is not configured to + // use the failover protocol, a lease’s binding state may be active, free or abandoned. The + // failover protocol adds some additional transitional states, as well as the backup state, + // which indicates that the lease is available for allocation by the failover secondary + BindingState string `xml:"bindingState"` + // NextBindingState statement indicates what state the lease will move to when the current state + // expires. The time when the current state expires is specified in the ends statement. + NextBindingState string `xml:"nextBindingState"` + // Cltt holds value of clients last transaction time (format is "weekday year/month/day + // hour:minute:second", e.g. "2 2019/12/17 06:12:03") + Cltt string `xml:"cltt"` + // Starts holds the start time of a lease (format is "weekday year/month/day + // hour:minute:second", e.g. "2 2019/12/17 06:12:03") + Starts string `xml:"starts"` + // Ends holds the end time of a lease (format is "weekday year/month/day hour:minute:second", + // e.g. "3 2019/12/18 06:12:03") + Ends string `xml:"ends"` + // HardwareType holds type of hardware, usually "ethernet" + HardwareType string `xml:"hardwareType"` +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/openapi.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/openapi.go new file mode 100644 index 000000000..1b0402075 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/openapi.go @@ -0,0 +1,367 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// OpenApiPages unwraps pagination for "Get All" endpoints in OpenAPI. Values kept in json.RawMessage helps to decouple +// marshalling paging related information from exact type related information. Paging can be handled dynamically this +// way while values can be marshaled into exact types. +type OpenApiPages struct { + // ResultTotal reports total results available + ResultTotal int `json:"resultTotal,omitempty"` + // PageCount reports total result pages available + PageCount int `json:"pageCount,omitempty"` + // Page reports current page of result + Page int `json:"page,omitempty"` + // PageSize reports page size + PageSize int `json:"pageSize,omitempty"` + // Associations ... + Associations interface{} `json:"associations,omitempty"` + // Values holds types depending on the endpoint therefore `json.RawMessage` is used to dynamically unmarshal into + // specific type as required + Values json.RawMessage `json:"values,omitempty"` +} + +// OpenApiError helps to marshal and provider meaningful `Error` for +type OpenApiError struct { + MinorErrorCode string `json:"minorErrorCode"` + Message string `json:"message"` + StackTrace string `json:"stackTrace"` +} + +// Error method implements Go's default `error` interface for CloudAPI errors formats them for human readable output. +func (openApiError OpenApiError) Error() string { + return fmt.Sprintf("%s - %s", openApiError.MinorErrorCode, openApiError.Message) +} + +// ErrorWithStack is the same as `Error()`, but also includes stack trace returned by API which is usually lengthy. +func (openApiError OpenApiError) ErrorWithStack() string { + return fmt.Sprintf("%s - %s. Stack: %s", openApiError.MinorErrorCode, openApiError.Message, + openApiError.StackTrace) +} + +// Role defines access roles in VCD +type Role struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + BundleKey string `json:"bundleKey"` + ReadOnly bool `json:"readOnly"` +} + +// NsxtTier0Router defines NSX-T Tier 0 router +type NsxtTier0Router struct { + ID string `json:"id,omitempty"` + Description string `json:"description"` + DisplayName string `json:"displayName"` +} + +// NsxtEdgeCluster is a struct to represent logical grouping of NSX-T Edge virtual machines. +type NsxtEdgeCluster struct { + // ID contains edge cluster ID (UUID format) + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + // NodeCount shows number of nodes in the edge cluster + NodeCount int `json:"nodeCount"` + // NodeType usually holds "EDGE_NODE" + NodeType string `json:"nodeType"` + // DeploymentType (e.g. "VIRTUAL_MACHINE") + DeploymentType string `json:"deploymentType"` +} + +// ExternalNetworkV2 defines a struct for OpenAPI endpoint which is capable of creating NSX-V or +// NSX-T external network based on provided NetworkBackings. +type ExternalNetworkV2 struct { + // ID is unique for the network. This field is read-only. + ID string `json:"id,omitempty"` + // Name of the network. + Name string `json:"name"` + // Description of the network + Description string `json:"description"` + // Subnets define one or more subnets and IP allocation pools in edge gateway + Subnets ExternalNetworkV2Subnets `json:"subnets"` + // NetworkBackings for this external network. Describes if this external network is backed by + // port groups, vCenter standard switch or an NSX-T Tier-0 router. + NetworkBackings ExternalNetworkV2Backings `json:"networkBackings"` +} + +// ExternalNetworkV2IPRange defines allocated IP pools for a subnet in external network +type ExternalNetworkV2IPRange struct { + // StartAddress holds starting IP address in the range + StartAddress string `json:"startAddress"` + // EndAddress holds ending IP address in the range + EndAddress string `json:"endAddress"` +} + +// ExternalNetworkV2IPRanges contains slice of ExternalNetworkV2IPRange +type ExternalNetworkV2IPRanges struct { + Values []ExternalNetworkV2IPRange `json:"values"` +} + +// ExternalNetworkV2Subnets contains slice of ExternalNetworkV2Subnet +type ExternalNetworkV2Subnets struct { + Values []ExternalNetworkV2Subnet `json:"values"` +} + +// ExternalNetworkV2Subnet defines one subnet for external network with assigned static IP ranges +type ExternalNetworkV2Subnet struct { + // Gateway for the subnet + Gateway string `json:"gateway"` + // PrefixLength holds prefix length of the subnet + PrefixLength int `json:"prefixLength"` + // DNSSuffix is the DNS suffix that VMs attached to this network will use (NSX-V only) + DNSSuffix string `json:"dnsSuffix"` + // DNSServer1 - first DNS server that VMs attached to this network will use (NSX-V only) + DNSServer1 string `json:"dnsServer1"` + // DNSServer2 - second DNS server that VMs attached to this network will use (NSX-V only) + DNSServer2 string `json:"dnsServer2"` + // Enabled indicates whether the external network subnet is currently enabled + Enabled bool `json:"enabled"` + // UsedIPCount shows number of IP addresses defined by the static IP ranges + UsedIPCount int `json:"usedIpCount,omitempty"` + // TotalIPCount shows number of IP address used from the static IP ranges + TotalIPCount int `json:"totalIpCount,omitempty"` + // IPRanges define allocated static IP pools allocated from a defined subnet + IPRanges ExternalNetworkV2IPRanges `json:"ipRanges"` +} + +type ExternalNetworkV2Backings struct { + Values []ExternalNetworkV2Backing `json:"values"` +} + +// ExternalNetworkV2Backing defines which networking subsystem is used for external network (NSX-T or NSX-V) +type ExternalNetworkV2Backing struct { + // BackingID must contain either Tier-0 router ID for NSX-T or PortGroup ID for NSX-V + BackingID string `json:"backingId"` + Name string `json:"name,omitempty"` + // BackingType can be either ExternalNetworkBackingTypeNsxtTier0Router in case of NSX-T or one + // of ExternalNetworkBackingTypeNetwork or ExternalNetworkBackingDvPortgroup in case of NSX-V + // Deprecated in favor of BackingTypeValue in API V35.0 + BackingType string `json:"backingType,omitempty"` + + // BackingTypeValue replaces BackingType in API V35.0 and adds support for additional network backing type + // ExternalNetworkBackingTypeNsxtSegment + BackingTypeValue string `json:"backingTypeValue,omitempty"` + // NetworkProvider defines backing network manager + NetworkProvider NetworkProvider `json:"networkProvider"` +} + +// NetworkProvider can be NSX-T manager or vCenter. ID is sufficient for creation purpose. +type NetworkProvider struct { + Name string `json:"name,omitempty"` + ID string `json:"id"` +} + +// VdcComputePolicy is represented as VM sizing policy in UI +type VdcComputePolicy struct { + ID string `json:"id,omitempty"` + Description string `json:"description,omitempty"` + Name string `json:"name"` + CPUSpeed *int `json:"cpuSpeed,omitempty"` + Memory *int `json:"memory,omitempty"` + CPUCount *int `json:"cpuCount,omitempty"` + CoresPerSocket *int `json:"coresPerSocket,omitempty"` + MemoryReservationGuarantee *float64 `json:"memoryReservationGuarantee,omitempty"` + CPUReservationGuarantee *float64 `json:"cpuReservationGuarantee,omitempty"` + CPULimit *int `json:"cpuLimit,omitempty"` + MemoryLimit *int `json:"memoryLimit,omitempty"` + CPUShares *int `json:"cpuShares,omitempty"` + MemoryShares *int `json:"memoryShares,omitempty"` + ExtraConfigs *struct { + AdditionalProp1 string `json:"additionalProp1,omitempty"` + AdditionalProp2 string `json:"additionalProp2,omitempty"` + AdditionalProp3 string `json:"additionalProp3,omitempty"` + } `json:"extraConfigs,omitempty"` + PvdcComputePolicyRef *struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + } `json:"pvdcComputePolicyRef,omitempty"` + PvdcComputePolicy *struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + } `json:"pvdcComputePolicy,omitempty"` + CompatibleVdcTypes []string `json:"compatibleVdcTypes,omitempty"` + IsSizingOnly bool `json:"isSizingOnly,omitempty"` + PvdcID string `json:"pvdcId,omitempty"` + NamedVMGroups [][]struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + } `json:"namedVmGroups,omitempty"` + LogicalVMGroupReferences []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + } `json:"logicalVmGroupReferences,omitempty"` + IsAutoGenerated bool `json:"isAutoGenerated,omitempty"` +} + +// OpenApiReference is a generic reference type commonly used throughout OpenAPI endpoints +type OpenApiReference struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type OpenApiReferences []OpenApiReference + +// VdcCapability can be used to determine VDC capabilities, including such: +// * Is it backed by NSX-T or NSX-V pVdc +// * Does it support BGP routing +type VdcCapability struct { + // Name of capability + Name string `json:"name"` + // Description of capability + Description string `json:"description"` + // Value can be any value. Sometimes it is a JSON bool (true, false), sometimes it is a JSON array (["custom", "default"]) + // and sometimes just a string ("NSX_V"). It is up for the consumer to handle values as per the Type field. + Value interface{} `json:"value"` + // Type of field (e.g. "Boolean", "String", "List") + Type string `json:"type"` + // Category of capability (e.g. "Security", "EdgeGateway", "OrgVdcNetwork") + Category string `json:"category"` +} + +// A Right is a component of a role, a global role, or a rights bundle. +// In this view, roles, global roles, and rights bundles are collections of rights. +// Note that the rights are not stored in the above collection structures, but retrieved separately +type Right struct { + Name string `json:"name"` + ID string `json:"id"` + Description string `json:"description,omitempty"` + BundleKey string `json:"bundleKey,omitempty"` // key used for internationalization + Category string `json:"category,omitempty"` // Category ID + ServiceNamespace string `json:"serviceNamespace,omitempty"` // Not used + RightType string `json:"rightType,omitempty"` // VIEW or MODIFY + ImpliedRights []OpenApiReference `json:"impliedRights,omitempty"` +} + +// RightsCategory defines the category to which the Right belongs +type RightsCategory struct { + Name string `json:"name"` + Id string `json:"id"` + BundleKey string `json:"bundleKey"` // key used for internationalization + Parent string `json:"parent"` + RightsCount struct { + View int `json:"view"` + Modify int `json:"modify"` + } `json:"rightsCount"` + SubCategories []string `json:"subCategories"` +} + +// RightsBundle is a collection of Rights to be assigned to a tenant(= organization). +// Changing a rights bundle and publishing it for a given tenant will limit +// the rights that the global roles implement in such tenant. +type RightsBundle struct { + Name string `json:"name"` + Id string `json:"id"` + Description string `json:"description,omitempty"` + BundleKey string `json:"bundleKey,omitempty"` // key used for internationalization + ReadOnly bool `json:"readOnly"` + PublishAll *bool `json:"publishAll"` +} + +// GlobalRole is a Role definition implemented in the provider that is passed on to tenants (=organizations) +// Modifying an existing global role has immediate effect on the corresponding roles in the tenants (no need +// to re-publish) while creating a new GlobalRole is only passed to the tenants if it is published. +type GlobalRole struct { + Name string `json:"name"` + Id string `json:"id"` + Description string `json:"description,omitempty"` + BundleKey string `json:"bundleKey,omitempty"` // key used for internationalization + ReadOnly bool `json:"readOnly"` + PublishAll *bool `json:"publishAll"` +} + +// OpenApiItems defines the input when multiple items need to be passed to a POST or PUT operation +// All the fields are optional, except Values +// This structure is the same as OpenApiPages, except for the type of Values, which is explicitly +// defined as a collection of name+ID structures +type OpenApiItems struct { + ResultTotal int `json:"resultTotal,omitempty"` + PageCount int `json:"pageCount,omitempty"` + Page int `json:"page,omitempty"` + PageSize int `json:"pageSize,omitempty"` + Associations interface{} `json:"associations,omitempty"` + Values []OpenApiReference `json:"values"` // a collection of items defined by an ID + a name +} + +// CertificateLibraryItem is a Certificate Library definition of stored Certificate details +type CertificateLibraryItem struct { + Alias string `json:"alias"` + Id string `json:"id,omitempty"` + Certificate string `json:"certificate"` // PEM encoded certificate + Description string `json:"description,omitempty"` + PrivateKey string `json:"privateKey,omitempty"` // PEM encoded private key. Required if providing a certificate chain + PrivateKeyPassphrase string `json:"privateKeyPassphrase,omitempty"` // passphrase for the private key. Required if the private key is encrypted +} + +// CurrentSessionInfo gives information about the current session +type CurrentSessionInfo struct { + ID string `json:"id"` // Session ID + User OpenApiReference `json:"user"` // Name of the user associated with this session + Org OpenApiReference `json:"org"` // Organization for this connection + Location string `json:"location"` // Location ID: unknown usage + Roles []string `json:"roles"` // Roles associated with the session user + RoleRefs OpenApiReferences `json:"roleRefs"` // Roles references for the session user + SessionIdleTimeoutMinutes int `json:"sessionIdleTimeoutMinutes"` // session idle timeout +} + +// VdcGroup is a VDC group definition +type VdcGroup struct { + Description string `json:"description,omitempty"` // The description of this group. + DfwEnabled bool `json:"dfwEnabled,omitempty"` // Whether Distributed Firewall is enabled for this vDC Group. Only applicable for NSX_T vDC Groups. + ErrorMessage string `json:"errorMessage,omitempty"` // If the group has an error status, a more detailed error message is set here. + Id string `json:"id,omitempty"` // The unique ID for the vDC Group (read-only). + LocalEgress bool `json:"localEgress,omitempty"` // Determines whether local egress is enabled for a universal router belonging to a universal vDC group. This value is used on create if universalNetworkingEnabled is set to true. This cannot be updated. This value is always false for local vDC groups. + Name string `json:"name"` // The name of this group. The name must be unique. + NetworkPoolId string `json:"networkPoolId,omitempty"` // ID of network pool to use if creating a local vDC group router. Must be set if creating a local group. Ignored if creating a universal group. + NetworkPoolUniversalId string `json:"networkPoolUniversalId,omitempty"` // The network provider’s universal id that is backing the universal network pool. This field is read-only and is derived from the list of participating vDCs if a universal vDC group is created. For universal vDC groups, each participating vDC should have a universal network pool that is backed by this same id. + NetworkProviderType string `json:"networkProviderType,omitempty"` // The values currently supported are NSX_V and NSX_T. Defines the networking provider backing the vDC Group. This is used on create. If not specified, NSX_V value will be used. NSX_V is used for existing vDC Groups and vDC Groups where Cross-VC NSX is used for the underlying technology. NSX_T is used when the networking provider type for the Organization vDCs in the group is NSX-T. NSX_T only supports groups of type LOCAL (single site). + OrgId string `json:"orgId"` // The organization that this group belongs to. + ParticipatingOrgVdcs []ParticipatingOrgVdcs `json:"participatingOrgVdcs"` // The list of organization vDCs that are participating in this group. + Status string `json:"status,omitempty"` // The status that the group can be in. Possible values are: SAVING, SAVED, CONFIGURING, REALIZED, REALIZATION_FAILED, DELETING, DELETE_FAILED, OBJECT_NOT_FOUND, UNCONFIGURED + Type string `json:"type,omitempty"` // Defines the group as LOCAL or UNIVERSAL. This cannot be changed. Local vDC Groups can have networks stretched across multiple vDCs in a single Cloud Director instance. Local vDC Groups share the same broadcast domain/transport zone and network provider scope. Universal vDC groups can have networks stretched across multiple vDCs in a single or multiple Cloud Director instance(s). Universal vDC groups are backed by a broadcast domain/transport zone that strectches across a single or multiple Cloud Director instance(s). Local vDC groups are supported for both NSX-V and NSX-T Network Provider Types. Universal vDC Groups are supported for only NSX_V Network Provider Type. Possible values are: LOCAL , UNIVERSAL + UniversalNetworkingEnabled bool `json:"universalNetworkingEnabled,omitempty"` // True means that a vDC group router has been created. If set to true for vdc group creation, a universal router will also be created. +} + +// ParticipatingOrgVdcs is a participating Org VDCs definition +type ParticipatingOrgVdcs struct { + FaultDomainTag string `json:"faultDomainTag,omitempty"` // Represents the fault domain of a given organization vDC. For NSX_V backed organization vDCs, this is the network provider scope. For NSX_T backed organization vDCs, this can vary (for example name of the provider vDC or compute provider scope). + NetworkProviderScope string `json:"networkProviderScope,omitempty"` // Read-only field that specifies the network provider scope of the vDC. + OrgRef OpenApiReference `json:"orgRef,omitempty"` // Read-only field that specifies what organization this vDC is in. + RemoteOrg bool `json:"remoteOrg,omitempty"` // Read-only field that specifies whether the vDC is local to this VCD site. + SiteRef OpenApiReference `json:"siteRef,omitempty"` // The site ID that this vDC belongs to. Required for universal vDC groups. + Status string `json:"status,omitempty"` // The status that the vDC can be in. An example is if the vDC has been deleted from the system but is still part of the group. Possible values are: SAVING, SAVED, CONFIGURING, REALIZED, REALIZATION_FAILED, DELETING, DELETE_FAILED, OBJECT_NOT_FOUND, UNCONFIGURED + VdcRef OpenApiReference `json:"vdcRef"` // The reference to the vDC that is part of this a vDC group. +} + +// CandidateVdc defines possible candidate VDCs for VDC group +type CandidateVdc struct { + FaultDomainTag string `json:"faultDomainTag"` + Id string `json:"id"` + Name string `json:"name"` + NetworkProviderScope string `json:"networkProviderScope"` + OrgRef OpenApiReference `json:"orgRef"` + SiteRef OpenApiReference `json:"siteRef"` +} + +// DfwPolicies defines Distributed firewall policies +type DfwPolicies struct { + Enabled bool `json:"enabled"` + DefaultPolicy *DefaultPolicy `json:"defaultPolicy,omitempty"` +} + +// DefaultPolicy defines Default policy for Distributed firewall +type DefaultPolicy struct { + Description string `json:"description,omitempty"` // Description for the security policy. + Enabled *bool `json:"enabled,omitempty"` // Whether this security policy is enabled. + Id string `json:"id,omitempty"` // The unique id of this security policy. On updates, the id is required for the policy, while for create a new id will be generated. This id is not a VCD URN. + Name string `json:"name"` // Name for the security policy. + Version *VersionField `json:"version,omitempty"` // This property describes the current version of the entity. To prevent clients from overwriting each other’s changes, update operations must include the version which can be obtained by issuing a GET operation. If the version number on an update call is missing, the operation will be rejected. This is only needed on update calls. +} + +// VersionField defines Version +type VersionField struct { + Version int `json:"version"` +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/saml.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/saml.go new file mode 100644 index 000000000..6fe5fc39b --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/saml.go @@ -0,0 +1,74 @@ +/* + * Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package types + +import ( + "encoding/xml" + "fmt" +) + +// VcdSamlMetadata helps to marshal vCD SAML Metadata endpoint response +// https://1.1.1.1/cloud/org/my-org/saml/metadata/alias/vcd +// +// Note. This structure is not complete and has many more fields. +type VcdSamlMetadata struct { + XMLName xml.Name `xml:"EntityDescriptor"` + Text string `xml:",chardata"` + ID string `xml:"ID,attr"` + // EntityID is the configured vCD Entity ID which is used in ADFS authentication request + EntityID string `xml:"entityID,attr"` +} + +// AdfsAuthErrorEnvelope helps to parse ADFS authentication error with help of Error() method +// +// Note. This structure is not complete and has many more fields. +type AdfsAuthErrorEnvelope struct { + XMLName xml.Name `xml:"Envelope"` + Body struct { + Text string `xml:",chardata"` + Fault struct { + Text string `xml:",chardata"` + Code struct { + Text string `xml:",chardata"` + Value string `xml:"Value"` + Subcode struct { + Text string `xml:",chardata"` + Value struct { + Text string `xml:",chardata"` + A string `xml:"a,attr"` + } `xml:"Value"` + } `xml:"Subcode"` + } `xml:"Code"` + Reason struct { + Chardata string `xml:",chardata"` + Text struct { + Text string `xml:",chardata"` + Lang string `xml:"lang,attr"` + } `xml:"Text"` + } `xml:"Reason"` + } `xml:"Fault"` + } `xml:"Body"` +} + +// Error satisfies Go's default `error` interface for AdfsAuthErrorEnvelope and formats +// error for humand readable output +func (samlErr AdfsAuthErrorEnvelope) Error() string { + return fmt.Sprintf("SAML request got error: %s", samlErr.Body.Fault.Reason.Text) +} + +// AdfsAuthResponseEnvelope helps to marshal ADFS reponse to authentication request. +// +// Note. This structure is not complete and has many more fields. +type AdfsAuthResponseEnvelope struct { + XMLName xml.Name `xml:"Envelope"` + Body struct { + RequestSecurityTokenResponseCollection struct { + RequestSecurityTokenResponse struct { + // RequestedSecurityTokenTxt returns data which is accepted by vCD as a SIGN token + RequestedSecurityTokenTxt InnerXML `xml:"RequestedSecurityToken"` + } `xml:"RequestSecurityTokenResponse"` + } `xml:"RequestSecurityTokenResponseCollection"` + } `xml:"Body"` +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/types.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/types.go new file mode 100644 index 000000000..c19b61205 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/types.go @@ -0,0 +1,3004 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +// Package types/v56 provider all types which are used by govcd package in order to perform API +// requests and parse responses +package types + +import ( + "encoding/xml" + "fmt" + "sort" +) + +// Maps status Attribute Values for VAppTemplate, VApp, VM, and Media Objects +var VAppStatuses = map[int]string{ + -1: "FAILED_CREATION", + 0: "UNRESOLVED", + 1: "RESOLVED", + 2: "DEPLOYED", + 3: "SUSPENDED", + 4: "POWERED_ON", + 5: "WAITING_FOR_INPUT", + 6: "UNKNOWN", + 7: "UNRECOGNIZED", + 8: "POWERED_OFF", + 9: "INCONSISTENT_STATE", + 10: "MIXED", + 11: "DESCRIPTOR_PENDING", + 12: "COPYING_CONTENTS", + 13: "DISK_CONTENTS_PENDING", + 14: "QUARANTINED", + 15: "QUARANTINE_EXPIRED", + 16: "REJECTED", + 17: "TRANSFER_TIMEOUT", + 18: "VAPP_UNDEPLOYED", + 19: "VAPP_PARTIALLY_DEPLOYED", + 20: "PARTIALLY_POWERED_OFF", + 21: "PARTIALLY_SUSPENDED", +} + +// Maps status Attribute Values for VDC Objects +var VDCStatuses = map[int]string{ + -1: "FAILED_CREATION", + 0: "NOT_READY", + 1: "READY", + 2: "UNKNOWN", + 3: "UNRECOGNIZED", +} + +// VCD API + +// DefaultStorageProfileSection is the name of the storage profile that will be specified for this virtual machine. The named storage profile must exist in the organization vDC that contains the virtual machine. If not specified, the default storage profile for the vDC is used. +// Type: DefaultStorageProfileSection_Type +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Name of the storage profile that will be specified for this virtual machine. The named storage profile must exist in the organization vDC that contains the virtual machine. If not specified, the default storage profile for the vDC is used. +// Since: 5.1 +type DefaultStorageProfileSection struct { + StorageProfile string `xml:"StorageProfile,omitempty"` +} + +// CustomizationSection represents a vApp template customization settings. +// Type: CustomizationSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp template customization settings. +// Since: 1.0 +type CustomizationSection struct { + // FIXME: OVF Section needs to be laid down correctly + Info string `xml:"ovf:Info"` + // + GoldMaster bool `xml:"goldMaster,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + CustomizeOnInstantiate bool `xml:"CustomizeOnInstantiate"` + Link LinkList `xml:"Link,omitempty"` +} + +// LeaseSettingsSection represents vApp lease settings. +// Type: LeaseSettingsSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp lease settings. +// Since: 0.9 +type LeaseSettingsSection struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + DeploymentLeaseExpiration string `xml:"DeploymentLeaseExpiration,omitempty"` + DeploymentLeaseInSeconds int `xml:"DeploymentLeaseInSeconds,omitempty"` + Link *Link `xml:"Link,omitempty"` + StorageLeaseExpiration string `xml:"StorageLeaseExpiration,omitempty"` + StorageLeaseInSeconds int `xml:"StorageLeaseInSeconds,omitempty"` +} + +// UpdateLeaseSettingsSection is an extended version of LeaseSettingsSection +// with additional fields for update +type UpdateLeaseSettingsSection struct { + XMLName xml.Name `xml:"LeaseSettingsSection"` + XmlnsOvf string `xml:"xmlns:ovf,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + OVFInfo string `xml:"ovf:Info"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + DeploymentLeaseExpiration string `xml:"DeploymentLeaseExpiration,omitempty"` + DeploymentLeaseInSeconds *int `xml:"DeploymentLeaseInSeconds,omitempty"` + Link *Link `xml:"Link,omitempty"` + StorageLeaseExpiration string `xml:"StorageLeaseExpiration,omitempty"` + StorageLeaseInSeconds *int `xml:"StorageLeaseInSeconds,omitempty"` +} + +// IPRange represents a range of IP addresses, start and end inclusive. +// Type: IpRangeType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a range of IP addresses, start and end inclusive. +// Since: 0.9 +type IPRange struct { + StartAddress string `xml:"StartAddress"` // Start address of the IP range. + EndAddress string `xml:"EndAddress"` // End address of the IP range. +} + +// DhcpService represents a DHCP network service. +// Type: DhcpServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a DHCP network service. +// Since: +type DhcpService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + DefaultLeaseTime int `xml:"DefaultLeaseTime,omitempty"` // Default lease in seconds for DHCP addresses. + MaxLeaseTime int `xml:"MaxLeaseTime"` // Max lease in seconds for DHCP addresses. + IPRange *IPRange `xml:"IpRange"` // IP range for DHCP addresses. + RouterIP string `xml:"RouterIp,omitempty"` // Router IP. + SubMask string `xml:"SubMask,omitempty"` // The subnet mask. + PrimaryNameServer string `xml:"PrimaryNameServer,omitempty"` // The primary name server. + SecondaryNameServer string `xml:"SecondaryNameServer,omitempty"` // The secondary name server. + DomainName string `xml:"DomainName,omitempty"` // The domain name. +} + +// NetworkFeatures represents features of a network. +// Type: NetworkFeaturesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents features of a network. +// Since: +type NetworkFeatures struct { + DhcpService *DhcpService `xml:"DhcpService,omitempty"` // Substitute for NetworkService. DHCP service settings + FirewallService *FirewallService `xml:"FirewallService,omitempty"` // Substitute for NetworkService. Firewall service settings + NatService *NatService `xml:"NatService,omitempty"` // Substitute for NetworkService. NAT service settings + StaticRoutingService *StaticRoutingService `xml:"StaticRoutingService,omitempty"` // Substitute for NetworkService. Static Routing service settings + // TODO: Not Implemented + // IpsecVpnService IpsecVpnService `xml:"IpsecVpnService,omitempty"` // Substitute for NetworkService. Ipsec Vpn service settings +} + +// IPAddresses a list of IP addresses +// Type: IpAddressesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of IP addresses. +// Since: 0.9 +type IPAddresses struct { + IPAddress []string `xml:"IpAddress,omitempty"` // A list of IP addresses. +} + +// IPRanges represents a list of IP ranges. +// Type: IpRangesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of IP ranges. +// Since: 0.9 +type IPRanges struct { + IPRange []*IPRange `xml:"IpRange,omitempty"` // IP range. +} + +// IPScope specifies network settings like gateway, network mask, DNS servers IP ranges etc +// Type: IpScopeType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Specify network settings like gateway, network mask, DNS servers, IP ranges, etc. +// Since: 0.9 +type IPScope struct { + IsInherited bool `xml:"IsInherited"` // True if the IP scope is inherit from parent network. + Gateway string `xml:"Gateway,omitempty"` // Gateway of the network. + Netmask string `xml:"Netmask,omitempty"` // Network mask. + DNS1 string `xml:"Dns1,omitempty"` // Primary DNS server. + DNS2 string `xml:"Dns2,omitempty"` // Secondary DNS server. + DNSSuffix string `xml:"DnsSuffix,omitempty"` // DNS suffix. + IsEnabled bool `xml:"IsEnabled,omitempty"` // Indicates if subnet is enabled or not. Default value is True. + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // IP ranges used for static pool allocation in the network. + AllocatedIPAddresses *IPAddresses `xml:"AllocatedIpAddresses,omitempty"` // Read-only list of allocated IP addresses in the network. + SubAllocations *SubAllocations `xml:"SubAllocations,omitempty"` // Read-only list of IP addresses that are sub allocated to edge gateways. +} + +// SubAllocations a list of IP addresses that are sub allocated to edge gateways. +// Type: SubAllocationsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of IP addresses that are sub allocated to edge gateways. +// Since: 5.1 +type SubAllocations struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + // Elements + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + SubAllocation *SubAllocation `xml:"SubAllocation,omitempty"` // IP Range sub allocated to a edge gateway. +} + +// SubAllocation IP range sub allocated to an edge gateway. +// Type: SubAllocationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: IP range sub allocated to an edge gateway. +// Since: 5.1 +type SubAllocation struct { + EdgeGateway *Reference `xml:"EdgeGateway,omitempty"` // Edge gateway that uses this sub allocation. + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // IP range sub allocated to the edge gateway. +} + +// IPScopes represents a list of IP scopes. +// Type: IpScopesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of IP scopes. +// Since: 5.1 +type IPScopes struct { + IPScope []*IPScope `xml:"IpScope"` // IP scope. +} + +// NetworkConfiguration is the configuration applied to a network. This is an abstract base type. +// The concrete types include those for vApp and Organization wide networks. +// Type: NetworkConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: The configurations applied to a network. This is an abstract base type. The concrete types include those for vApp and Organization wide networks. +// Since: 0.9 +type NetworkConfiguration struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + BackwardCompatibilityMode bool `xml:"BackwardCompatibilityMode"` + IPScopes *IPScopes `xml:"IpScopes,omitempty"` + ParentNetwork *Reference `xml:"ParentNetwork,omitempty"` + FenceMode string `xml:"FenceMode"` + RetainNetInfoAcrossDeployments *bool `xml:"RetainNetInfoAcrossDeployments,omitempty"` + Features *NetworkFeatures `xml:"Features,omitempty"` + + // SubInterface and DistributedInterface are mutually exclusive + // When they are both nil, it means the "internal" interface (the default) will be used. + // When one of them is set, the corresponding interface will be used. + // They cannot be both set (we'll get an API error if we do). + SubInterface *bool `xml:"SubInterface,omitempty"` + DistributedInterface *bool `xml:"DistributedInterface,omitempty"` + GuestVlanAllowed *bool `xml:"GuestVlanAllowed,omitempty"` + // TODO: Not Implemented + // RouterInfo RouterInfo `xml:"RouterInfo,omitempty"` + // SyslogServerSettings SyslogServerSettings `xml:"SyslogServerSettings,omitempty"` +} + +// VAppNetworkConfiguration represents a vApp network configuration +// Used in vApp network configuration actions as part of vApp type, +// VApp.NetworkConfigSection.NetworkConfig or directly as NetworkConfigSection.NetworkConfig for various API calls. +// Type: VAppNetworkConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp network configuration. +// Since: 0.9 +type VAppNetworkConfiguration struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + NetworkName string `xml:"networkName,attr"` + + Link *Link `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + Configuration *NetworkConfiguration `xml:"Configuration"` + IsDeployed bool `xml:"IsDeployed"` +} + +// VAppNetwork represents a vApp network configuration +// Used as input PUT /network/{id} +// Type: VAppNetworkType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp network configuration. +// Since: 0.9 +type VAppNetwork struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + Name string `xml:"name,attr"` + Deployed *bool `xml:"deployed,attr"` // True if the network is deployed. + + Link *Link `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + Configuration *NetworkConfiguration `xml:"Configuration"` +} + +// NetworkConfigSection is container for vApp networks. +// Type: NetworkConfigSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for vApp networks. +// Since: 0.9 +type NetworkConfigSection struct { + // Extends OVF Section_Type + // FIXME: Fix the OVF section + XMLName xml.Name `xml:"NetworkConfigSection"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + + Info string `xml:"ovf:Info"` + // + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + NetworkConfig []VAppNetworkConfiguration `xml:"NetworkConfig,omitempty"` +} + +// NetworkNames allows to extract network names +func (n NetworkConfigSection) NetworkNames() []string { + var list []string + for _, netConfig := range n.NetworkConfig { + list = append(list, netConfig.NetworkName) + } + return list +} + +// NetworkConnection represents a network connection in the virtual machine. +// Type: NetworkConnectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a network connection in the virtual machine. +// Since: 0.9 +type NetworkConnection struct { + Network string `xml:"network,attr"` // Name of the network to which this NIC is connected. + NeedsCustomization bool `xml:"needsCustomization,attr,omitempty"` // True if this NIC needs customization. + NetworkConnectionIndex int `xml:"NetworkConnectionIndex"` // Virtual slot number associated with this NIC. First slot number is 0. + IPAddress string `xml:"IpAddress,omitempty"` // IP address assigned to this NIC. + ExternalIPAddress string `xml:"ExternalIpAddress,omitempty"` // If the network to which this NIC connects provides NAT services, the external address assigned to this NIC appears here. + IsConnected bool `xml:"IsConnected"` // If the virtual machine is undeployed, this value specifies whether the NIC should be connected upon deployment. If the virtual machine is deployed, this value reports the current status of this NIC's connection, and can be updated to change that connection status. + MACAddress string `xml:"MACAddress,omitempty"` // MAC address associated with the NIC. + IPAddressAllocationMode string `xml:"IpAddressAllocationMode"` // IP address allocation mode for this connection. One of: POOL (A static IP address is allocated automatically from a pool of addresses.) DHCP (The IP address is obtained from a DHCP service.) MANUAL (The IP address is assigned manually in the IpAddress element.) NONE (No IP addressing mode specified.) + NetworkAdapterType string `xml:"NetworkAdapterType,omitempty"` +} + +// NetworkConnectionSection the container for the network connections of this virtual machine. +// Type: NetworkConnectionSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for the network connections of this virtual machine. +// Since: 0.9 +type NetworkConnectionSection struct { + // Extends OVF Section_Type + // FIXME: Fix the OVF section + XMLName xml.Name `xml:"NetworkConnectionSection"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + + Info string `xml:"ovf:Info"` + // + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + PrimaryNetworkConnectionIndex int `xml:"PrimaryNetworkConnectionIndex"` + NetworkConnection []*NetworkConnection `xml:"NetworkConnection,omitempty"` + Link *Link `xml:"Link,omitempty"` +} + +// InstantiationParams is a container for ovf:Section_Type elements that specify vApp configuration on instantiate, compose, or recompose. +// Type: InstantiationParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for ovf:Section_Type elements that specify vApp configuration on instantiate, compose, or recompose. +// Since: 0.9 +type InstantiationParams struct { + CustomizationSection *CustomizationSection `xml:"CustomizationSection,omitempty"` + DefaultStorageProfileSection *DefaultStorageProfileSection `xml:"DefaultStorageProfileSection,omitempty"` + GuestCustomizationSection *GuestCustomizationSection `xml:"GuestCustomizationSection,omitempty"` + LeaseSettingsSection *LeaseSettingsSection `xml:"LeaseSettingsSection,omitempty"` + NetworkConfigSection *NetworkConfigSection `xml:"NetworkConfigSection,omitempty"` + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + ProductSection *ProductSection `xml:"ProductSection,omitempty"` + // TODO: Not Implemented + // SnapshotSection SnapshotSection `xml:"SnapshotSection,omitempty"` +} + +// OrgVDCNetwork represents an Org VDC network in the vCloud model. +// Type: OrgVdcNetworkType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an Org VDC network in the vCloud model. +// Since: 5.1 +type OrgVDCNetwork struct { + XMLName xml.Name `xml:"OrgVdcNetwork"` + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr,omitempty"` + Link []Link `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + Configuration *NetworkConfiguration `xml:"Configuration,omitempty"` + EdgeGateway *Reference `xml:"EdgeGateway,omitempty"` + ServiceConfig *GatewayFeatures `xml:"ServiceConfig,omitempty"` // Specifies the service configuration for an isolated Org VDC networks + IsShared bool `xml:"IsShared"` + VimPortGroupRef []*VimObjectRef `xml:"VimPortGroupRef,omitempty"` // Needed to set up DHCP inside ServiceConfig + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// SupportedHardwareVersions contains a list of VMware virtual hardware versions supported in this vDC. +// Type: SupportedHardwareVersionsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Contains a list of VMware virtual hardware versions supported in this vDC. +// Since: 1.5 +type SupportedHardwareVersions struct { + SupportedHardwareVersion []string `xml:"SupportedHardwareVersion,omitempty"` // A virtual hardware version supported in this vDC. +} + +// Capabilities collection of supported hardware capabilities. +// Type: CapabilitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Collection of supported hardware capabilities. +// Since: 1.5 +type Capabilities struct { + SupportedHardwareVersions *SupportedHardwareVersions `xml:"SupportedHardwareVersions,omitempty"` // Read-only list of virtual hardware versions supported by this vDC. +} + +// Vdc represents the user view of an organization VDC. +// Type: VdcType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of an organization VDC. +// Since: 0.9 +type Vdc struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status int `xml:"status,attr,omitempty"` + + Link LinkList `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + AllocationModel string `xml:"AllocationModel"` + ComputeCapacity []*ComputeCapacity `xml:"ComputeCapacity"` + ResourceEntities []*ResourceEntities `xml:"ResourceEntities,omitempty"` + AvailableNetworks []*AvailableNetworks `xml:"AvailableNetworks,omitempty"` + Capabilities []*Capabilities `xml:"Capabilities,omitempty"` + NicQuota int `xml:"NicQuota"` + NetworkQuota int `xml:"NetworkQuota"` + UsedNetworkCount int `xml:"UsedNetworkCount,omitempty"` + VMQuota int `xml:"VmQuota"` + IsEnabled bool `xml:"IsEnabled"` + VdcStorageProfiles *VdcStorageProfiles `xml:"VdcStorageProfiles"` + DefaultComputePolicy *Reference `xml:"DefaultComputePolicy"` +} + +// AdminVdc represents the admin view of an organization VDC. +// Type: AdminVdcType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the admin view of an organization VDC. +// Since: 0.9 +type AdminVdc struct { + Xmlns string `xml:"xmlns,attr"` + Vdc + + VCpuInMhz2 *int64 `xml:"VCpuInMhz2,omitempty"` + ResourceGuaranteedMemory *float64 `xml:"ResourceGuaranteedMemory,omitempty"` + ResourceGuaranteedCpu *float64 `xml:"ResourceGuaranteedCpu,omitempty"` + VCpuInMhz *int64 `xml:"VCpuInMhz,omitempty"` + IsThinProvision *bool `xml:"IsThinProvision,omitempty"` + NetworkPoolReference *Reference `xml:"NetworkPoolReference,omitempty"` + ProviderVdcReference *Reference `xml:"ProviderVdcReference"` + ResourcePoolRefs *VimObjectRefs `xml:"vmext:ResourcePoolRefs,omitempty"` + UsesFastProvisioning *bool `xml:"UsesFastProvisioning,omitempty"` + OverCommitAllowed bool `xml:"OverCommitAllowed,omitempty"` + VmDiscoveryEnabled bool `xml:"VmDiscoveryEnabled,omitempty"` + IsElastic *bool `xml:"IsElastic,omitempty"` // Supported from 32.0 for the Flex model + IncludeMemoryOverhead *bool `xml:"IncludeMemoryOverhead,omitempty"` // Supported from 32.0 for the Flex model + UniversalNetworkPoolReference *Reference `xml:"UniversalNetworkPoolReference,omitempty"` // Reference to a universal network pool +} + +// VdcStorageProfileConfiguration represents the parameters to assign a storage profile in creation of organization vDC. +// Type: VdcStorageProfileParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the parameters to create a storage profile in an organization vDC. +// Since: 5.1 +// https://code.vmware.com/apis/220/vcloud#/doc/doc/types/VdcStorageProfileParamsType.html +type VdcStorageProfileConfiguration struct { + Enabled bool `xml:"Enabled,omitempty"` + Units string `xml:"Units"` + Limit int64 `xml:"Limit"` + Default bool `xml:"Default"` + ProviderVdcStorageProfile *Reference `xml:"ProviderVdcStorageProfile"` +} + +// VdcStorageProfile represents the parameters for fetched storage profile +// Type: VdcStorageProfileParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VdcStorageProfileType.html +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/71e12563-bc11-4d64-821d-92d30f8fcfa1/7424bf8e-aec2-44ad-be7d-b98feda7bae0/doc/doc/types/AdminVdcStorageProfileType.html +type VdcStorageProfile struct { + Xmlns string `xml:"xmlns,attr"` + Name string `xml:"name,attr"` + Enabled bool `xml:"Enabled,omitempty"` + Units string `xml:"Units"` + Limit int64 `xml:"Limit"` + Default bool `xml:"Default"` + IopsSettings *VdcStorageProfileIopsSettings `xml:"IopsSettingsint64"` + StorageUsedMB int64 `xml:"StorageUsedMB"` + IopsAllocated int64 `xml:"IopsAllocated"` + ProviderVdcStorageProfile *Reference `xml:"ProviderVdcStorageProfile"` +} + +// AdminVdcStorageProfile represents the parameters for fetched storage profile +// Type: AdminVdcStorageProfileType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/71e12563-bc11-4d64-821d-92d30f8fcfa1/7424bf8e-aec2-44ad-be7d-b98feda7bae0/doc/doc/types/AdminVdcStorageProfileType.html +type AdminVdcStorageProfile struct { + Xmlns string `xml:"xmlns,attr"` + Name string `xml:"name,attr"` + Enabled *bool `xml:"Enabled,omitempty"` + Units string `xml:"Units"` + Limit int64 `xml:"Limit"` + Default bool `xml:"Default"` + IopsSettings *VdcStorageProfileIopsSettings `xml:"IopsSettingsint64"` + StorageUsedMB int64 `xml:"StorageUsedMB"` + IopsAllocated int64 `xml:"IopsAllocated"` + ProviderVdcStorageProfile *Reference `xml:"ProviderVdcStorageProfile"` +} + +// VdcStorageProfileIopsSettings represents the parameters for VDC storage profiles Iops settings. +// Type: VdcStorageProfileIopsSettings +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/71e12563-bc11-4d64-821d-92d30f8fcfa1/7424bf8e-aec2-44ad-be7d-b98feda7bae0/doc/doc/types/VdcStorageProfileIopsSettingsType.html +type VdcStorageProfileIopsSettings struct { + Xmlns string `xml:"xmlns,attr"` + Enabled bool `xml:"enabled"` + DiskIopsMax int64 `xml:"diskIopsMax,"` + DiskIopsDefault int64 `xml:"diskIopsDefault"` + StorageProfileIopsLimit int64 `xml:"storageProfileIopsLimit,omitempty"` + DiskIopsPerGbMax int64 `xml:"diskIopsPerGbMax"` +} + +// VdcConfiguration models the payload for creating a VDC. +// Type: CreateVdcParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Parameters for creating an organization VDC +// Since: 5.1 +// https://code.vmware.com/apis/220/vcloud#/doc/doc/types/CreateVdcParamsType.html +type VdcConfiguration struct { + XMLName xml.Name `xml:"CreateVdcParams"` + Xmlns string `xml:"xmlns,attr"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + AllocationModel string `xml:"AllocationModel"` // Flex supported from 32.0 + ComputeCapacity []*ComputeCapacity `xml:"ComputeCapacity"` + NicQuota int `xml:"NicQuota,omitempty"` + NetworkQuota int `xml:"NetworkQuota,omitempty"` + VmQuota int `xml:"VmQuota,omitempty"` + IsEnabled bool `xml:"IsEnabled,omitempty"` + // Create uses VdcStorageProfileConfiguration and fetch AdminVdcStorageProfile or VdcStorageProfile + VdcStorageProfile []*VdcStorageProfileConfiguration `xml:"VdcStorageProfile"` + ResourceGuaranteedMemory *float64 `xml:"ResourceGuaranteedMemory,omitempty"` + ResourceGuaranteedCpu *float64 `xml:"ResourceGuaranteedCpu,omitempty"` + VCpuInMhz int64 `xml:"VCpuInMhz,omitempty"` + IsThinProvision bool `xml:"IsThinProvision,omitempty"` + NetworkPoolReference *Reference `xml:"NetworkPoolReference,omitempty"` + ProviderVdcReference *Reference `xml:"ProviderVdcReference"` + UsesFastProvisioning bool `xml:"UsesFastProvisioning,omitempty"` + OverCommitAllowed bool `xml:"OverCommitAllowed,omitempty"` + VmDiscoveryEnabled bool `xml:"VmDiscoveryEnabled,omitempty"` + IsElastic *bool `xml:"IsElastic,omitempty"` // Supported from 32.0 for the Flex model + IncludeMemoryOverhead *bool `xml:"IncludeMemoryOverhead,omitempty"` // Supported from 32.0 for the Flex model +} + +// Task represents an asynchronous operation in vCloud Director. +// Type: TaskType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an asynchronous operation in vCloud Director. +// Since: 0.9 +// Comments added from https://code.vmware.com/apis/912/vmware-cloud-director/doc/doc/types/TaskType.html +type Task struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status string `xml:"status,attr"` // The execution status of the task. One of queued, preRunning, running, success, error, aborted + Operation string `xml:"operation,attr,omitempty"` // A message describing the operation that is tracked by this task. + OperationName string `xml:"operationName,attr,omitempty"` // The short name of the operation that is tracked by this task. + ServiceNamespace string `xml:"serviceNamespace,attr,omitempty"` // Identifier of the service that created the task. It must not start with com.vmware.vcloud and the length must be between 1 and 128 symbols. + StartTime string `xml:"startTime,attr,omitempty"` // The date and time the system started executing the task. May not be present if the task has not been executed yet. + EndTime string `xml:"endTime,attr,omitempty"` // The date and time that processing of the task was completed. May not be present if the task is still being executed. + ExpiryTime string `xml:"expiryTime,attr,omitempty"` // The date and time at which the task resource will be destroyed and no longer available for retrieval. May not be present if the task has not been executed or is still being executed. + CancelRequested bool `xml:"cancelRequested,attr,omitempty"` // Whether user has requested this processing to be canceled. + Description string `xml:"Description,omitempty"` // Optional description. + Details string `xml:"Details,omitempty"` // Detailed message about the task. Also contained by the Owner entity when task status is preRunning. + Error *Error `xml:"Error,omitempty"` // Represents error information from a failed task. + Link *Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Organization *Reference `xml:"Organization,omitempty"` // The organization to which the User belongs. + Owner *Reference `xml:"Owner,omitempty"` // Reference to the owner of the task. This is typically the object that the task is creating or updating. + Progress int `xml:"Progress,omitempty"` // Read-only indicator of task progress as an approximate percentage between 0 and 100. Not available for all tasks. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + User *Reference `xml:"User,omitempty"` // The user who started the task. + // + // TODO: add the following fields + // Params anyType The parameters with which this task was started. + // Result ResultType An optional element that can be used to hold the result of a task. + // VcTaskList VcTaskListType List of Virtual Center tasks related to this vCD task. +} + +// CapacityWithUsage represents a capacity and usage of a given resource. +// Type: CapacityWithUsageType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a capacity and usage of a given resource. +// Since: 0.9 +type CapacityWithUsage struct { + Units string `xml:"Units"` + Allocated int64 `xml:"Allocated"` + Limit int64 `xml:"Limit"` + Reserved int64 `xml:"Reserved,omitempty"` + Used int64 `xml:"Used,omitempty"` +} + +// ComputeCapacity represents VDC compute capacity. +// Type: ComputeCapacityType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents VDC compute capacity. +// Since: 0.9 +type ComputeCapacity struct { + CPU *CapacityWithUsage `xml:"Cpu"` + Memory *CapacityWithUsage `xml:"Memory"` +} + +// Reference is a reference to a resource. Contains an href attribute and optional name and type attributes. +// Type: ReferenceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A reference to a resource. Contains an href attribute and optional name and type attributes. +// Since: 0.9 +type Reference struct { + HREF string `xml:"href,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` +} + +// ResourceReference represents a reference to a resource. Contains an href attribute, a resource status attribute, and optional name and type attributes. +// Type: ResourceReferenceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a reference to a resource. Contains an href attribute, a resource status attribute, and optional name and type attributes. +// Since: 0.9 +type ResourceReference struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` +} + +// VdcStorageProfiles is a container for references to storage profiles associated with a vDC. +// Element: VdcStorageProfiles +// Type: VdcStorageProfilesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to storage profiles associated with a vDC. +// Since: 5.1 +type VdcStorageProfiles struct { + VdcStorageProfile []*Reference `xml:"VdcStorageProfile,omitempty"` +} + +// ResourceEntities is a container for references to ResourceEntity objects in this vDC. +// Type: ResourceEntitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to ResourceEntity objects in this vDC. +// Since: 0.9 +type ResourceEntities struct { + ResourceEntity []*ResourceReference `xml:"ResourceEntity,omitempty"` +} + +// AvailableNetworks is a container for references to available organization vDC networks. +// Type: AvailableNetworksType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to available organization vDC networks. +// Since: 0.9 +type AvailableNetworks struct { + Network []*Reference `xml:"Network,omitempty"` +} + +// Link extends reference type by adding relation attribute. Defines a hyper-link with a relationship, hyper-link reference, and an optional MIME type. +// Type: LinkType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Extends reference type by adding relation attribute. Defines a hyper-link with a relationship, hyper-link reference, and an optional MIME type. +// Since: 0.9 +type Link struct { + HREF string `xml:"href,attr"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Rel string `xml:"rel,attr"` +} + +// OrgList represents a lists of Organizations +// Type: OrgType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of vCloud Director organizations. +// Since: 0.9 +type OrgList struct { + Link LinkList `xml:"Link,omitempty"` + Org []*Org `xml:"Org,omitempty"` +} + +// Org represents the user view of a vCloud Director organization. +// Type: OrgType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of a vCloud Director organization. +// Since: 0.9 +type Org struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + FullName string `xml:"FullName"` + IsEnabled bool `xml:"IsEnabled,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// List of the users within the organization +type OrgUserList struct { + User []*Reference `xml:"UserReference,omitempty"` +} + +type OrgGroupList struct { + Group []*Reference `xml:"GroupReference,omitempty"` +} + +// List of available roles in the organization +type OrgRoleType struct { + RoleReference []*Reference `xml:"RoleReference,omitempty"` +} + +// List of available rights in the organization +type RightsType struct { + Links LinkList `xml:"Link,omitempty"` + RightReference []*Reference `xml:"RightReference,omitempty"` +} + +// AdminOrg represents the admin view of a vCloud Director organization. +// Type: AdminOrgType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the admin view of a vCloud Director organization. +// Since: 0.9 +type AdminOrg struct { + XMLName xml.Name `xml:"AdminOrg"` + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + FullName string `xml:"FullName"` + IsEnabled bool `xml:"IsEnabled,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + Users *OrgUserList `xml:"Users,omitempty"` + Groups *OrgGroupList `xml:"Groups,omitempty"` + Catalogs *CatalogsList `xml:"Catalogs,omitempty"` + OrgSettings *OrgSettings `xml:"Settings,omitempty"` + Vdcs *VDCList `xml:"Vdcs,omitempty"` + Networks *NetworksList `xml:"Networks,omitempty"` + RightReferences *OrgRoleType `xml:"RightReferences,omitempty"` + RoleReferences *OrgRoleType `xml:"RoleReferences,omitempty"` +} + +// OrgSettingsType represents the settings for a vCloud Director organization. +// Type: OrgSettingsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the settings of a vCloud Director organization. +// Since: 0.9 +type OrgSettings struct { + //attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + //elements + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + OrgGeneralSettings *OrgGeneralSettings `xml:"OrgGeneralSettings,omitempty"` // General Settings for the org, not-required + OrgVAppLeaseSettings *VAppLeaseSettings `xml:"VAppLeaseSettings,omitempty"` + OrgVAppTemplateSettings *VAppTemplateLeaseSettings `xml:"VAppTemplateLeaseSettings,omitempty"` // Vapp template lease settings, not required + OrgLdapSettings *OrgLdapSettingsType `xml:"OrgLdapSettings,omitempty"` //LDAP settings, not-requried, defaults to none + +} + +// OrgGeneralSettingsType represents the general settings for a vCloud Director organization. +// Type: OrgGeneralSettingsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of a vCloud Director organization. +// Since: 0.9 +type OrgGeneralSettings struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + CanPublishCatalogs bool `xml:"CanPublishCatalogs,omitempty"` + DeployedVMQuota int `xml:"DeployedVMQuota,omitempty"` + StoredVMQuota int `xml:"StoredVmQuota,omitempty"` + UseServerBootSequence bool `xml:"UseServerBootSequence,omitempty"` + DelayAfterPowerOnSeconds int `xml:"DelayAfterPowerOnSeconds,omitempty"` +} + +// VAppTemplateLeaseSettings represents the vapp template lease settings for a vCloud Director organization. +// Type: VAppTemplateLeaseSettingsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the vapp template lease settings of a vCloud Director organization. +// Since: 0.9 +type VAppTemplateLeaseSettings struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + DeleteOnStorageLeaseExpiration *bool `xml:"DeleteOnStorageLeaseExpiration,omitempty"` + StorageLeaseSeconds *int `xml:"StorageLeaseSeconds,omitempty"` +} + +type VAppLeaseSettings struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + DeleteOnStorageLeaseExpiration *bool `xml:"DeleteOnStorageLeaseExpiration,omitempty"` + DeploymentLeaseSeconds *int `xml:"DeploymentLeaseSeconds,omitempty"` + StorageLeaseSeconds *int `xml:"StorageLeaseSeconds,omitempty"` + PowerOffOnRuntimeLeaseExpiration *bool `xml:"PowerOffOnRuntimeLeaseExpiration,omitempty"` +} + +type OrgFederationSettings struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + Enabled bool `xml:"Enabled,omitempty"` +} + +// OrgLdapSettingsType represents the ldap settings for a vCloud Director organization. +// Type: OrgLdapSettingsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the ldap settings of a vCloud Director organization. +// Since: 0.9 +type OrgLdapSettingsType struct { + XMLName xml.Name `xml:"OrgLdapSettings"` + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + CustomUsersOu string `xml:"CustomUsersOu,omitempty"` // If OrgLdapMode is SYSTEM, specifies an LDAP attribute=value pair to use for OU (organizational unit). + OrgLdapMode string `xml:"OrgLdapMode,omitempty"` // LDAP mode you want + CustomOrgLdapSettings *CustomOrgLdapSettings `xml:"CustomOrgLdapSettings,omitempty"` // Needs to be set if user chooses custom mode +} + +// CustomOrgLdapSettings represents the custom ldap settings for a vCloud Director organization. +// Type: CustomOrgLdapSettingsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the custom ldap settings of a vCloud Director organization. +// Since: 0.9 +// Note. Order of these fields matter and API will error if it is changed +type CustomOrgLdapSettings struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + + HostName string `xml:"HostName,omitempty"` + Port int `xml:"Port"` + IsSsl bool `xml:"IsSsl,omitempty"` + IsSslAcceptAll bool `xml:"IsSslAcceptAll,omitempty"` + SearchBase string `xml:"SearchBase,omitempty"` + Username string `xml:"UserName,omitempty"` + Password string `xml:"Password,omitempty"` + AuthenticationMechanism string `xml:"AuthenticationMechanism"` + IsGroupSearchBaseEnabled bool `xml:"IsGroupSearchBaseEnabled"` + GroupSearchBase string `xml:"GroupSearchBase,omitempty"` + ConnectorType string `xml:"ConnectorType"` // Defines LDAP service implementation type + UserAttributes *OrgLdapUserAttributes `xml:"UserAttributes"` // Defines how LDAP attributes are used when importing a user. + GroupAttributes *OrgLdapGroupAttributes `xml:"GroupAttributes"` // Defines how LDAP attributes are used when importing a group. + UseExternalKerberos bool `xml:"UseExternalKerberos"` + + Realm string `xml:"Realm,omitempty"` +} + +// OrgLdapGroupAttributes represents the ldap group attribute settings for a vCloud Director organization. +// Type: OrgLdapGroupAttributesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the ldap group attribute settings of a vCloud Director organization. +// Since: 0.9 +// Note. Order of these fields matter and API will error if it is changed +type OrgLdapGroupAttributes struct { + ObjectClass string `xml:"ObjectClass"` + ObjectIdentifier string `xml:"ObjectIdentifier"` + GroupName string `xml:"GroupName"` + Membership string `xml:"Membership"` + BackLinkIdentifier string `xml:"BackLinkIdentifier,omitempty"` + MembershipIdentifier string `xml:"MembershipIdentifier"` +} + +// OrgLdapUserAttributesType represents the ldap user attribute settings for a vCloud Director organization. +// Type: OrgLdapUserAttributesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the ldap user attribute settings of a vCloud Director organization. +// Since: 0.9 +// Note. Order of these fields matter and API will error if it is changed. +type OrgLdapUserAttributes struct { + ObjectClass string `xml:"ObjectClass"` + ObjectIdentifier string `xml:"ObjectIdentifier"` + Username string `xml:"UserName,omitempty"` + Email string `xml:"Email"` + FullName string `xml:"FullName"` + GivenName string `xml:"GivenName"` + Surname string `xml:"Surname"` + Telephone string `xml:"Telephone"` + GroupMembershipIdentifier string `xml:"GroupMembershipIdentifier"` + GroupBackLinkIdentifier string `xml:"GroupBackLinkIdentifier,omitempty"` +} + +// VDCList contains a list of references to Org VDCs +// Type: VdcListType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of organization vDCs. +// Since: 0.9 +type VDCList struct { + Vdcs []*Reference `xml:"Vdc,omitempty"` +} + +// NetworksListType contains a list of references to Org Networks +// Type: NetworksListType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of organization Networks. +// Since: 0.9 +type NetworksList struct { + Networks []*Reference `xml:"Network,omitempty"` +} + +// CatalogsList contains a list of references to Org Catalogs +// Type: CatalogsListType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of organization Catalogs. +// Since: 0.9 +type CatalogsList struct { + Catalog []*Reference `xml:"CatalogReference,omitempty"` +} + +// CatalogItem contains a reference to a VappTemplate or Media object and related metadata. +// Type: CatalogItemType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Contains a reference to a VappTemplate or Media object and related metadata. +// Since: 0.9 +type CatalogItem struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Size int64 `xml:"size,attr,omitempty"` + DateCreated string `xml:"DateCreated,omitempty"` + Description string `xml:"Description,omitempty"` + Entity *Entity `xml:"Entity"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + VersionNumber int64 `xml:"VersionNumber,omitempty"` +} + +// Entity is a basic entity type in the vCloud object model. Includes a name, an optional description, and an optional list of links. +// Type: EntityType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Basic entity type in the vCloud object model. Includes a name, an optional description, and an optional list of links. +// Since: 0.9 +type Entity struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// CatalogItems is a container for references to catalog items. +// Type: CatalogItemsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for references to catalog items. +// Since: 0.9 +type CatalogItems struct { + CatalogItem []*Reference `xml:"CatalogItem"` +} + +// Catalog represents the user view of a Catalog object. +// Type: CatalogType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the user view of a Catalog object. +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/CatalogType.html +// Since: 0.9 +type Catalog struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + CatalogItems []*CatalogItems `xml:"CatalogItems,omitempty"` + DateCreated string `xml:"DateCreated,omitempty"` + Description string `xml:"Description,omitempty"` + IsPublished bool `xml:"IsPublished,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + VersionNumber int64 `xml:"VersionNumber,omitempty"` +} + +// AdminCatalog represents the Admin view of a Catalog object. +// Type: AdminCatalogType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the Admin view of a Catalog object. +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/AdminCatalogType.html +// Since: 0.9 +type AdminCatalog struct { + Catalog + XMLName xml.Name `xml:"AdminCatalog"` + Xmlns string `xml:"xmlns,attr"` + PublishExternalCatalogParams *PublishExternalCatalogParams `xml:"PublishExternalCatalogParams,omitempty"` + CatalogStorageProfiles *CatalogStorageProfiles `xml:"CatalogStorageProfiles,omitempty"` + ExternalCatalogSubscription *ExternalCatalogSubscription `xml:"ExternalCatalogSubscriptionParams,omitempty"` + IsPublished bool `xml:"IsPublished,omitempty"` +} + +// PublishExternalCatalogParamsType represents the configuration parameters of a catalog published externally +// Type: PublishExternalCatalogParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the configuration parameters of a catalog published externally. +// Since: 5.5 +type PublishExternalCatalogParams struct { + IsCachedEnabled bool `xml:"IsCacheEnabled,omitempty"` + IsPublishedExternally bool `xml:"IsPublishedExternally,omitempty"` + Password string `xml:"Password,omitempty"` + PreserveIdentityInfoFlag bool `xml:"PreserveIdentityInfoFlag,omitempty"` + CatalogPublishedUrl string `xml:"catalogPublishedUrl,omitempty"` +} + +// ExternalCatalogSubscription represents the configuration parameters for a catalog that has an external subscription +// Type: ExternalCatalogSubscriptionParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the configuration parameters for a catalog that has an external subscription. +// Since: 5.5 +type ExternalCatalogSubscription struct { + ExpectedSslThumbprint bool `xml:"ExpectedSslThumbprint,omitempty"` + LocalCopy bool `xml:"LocalCopy,omitempty"` + Password string `xml:"Password,omitempty"` + SubscribeToExternalFeeds bool `xml:"SubscribeToExternalFeeds,omitempty"` + Location string `xml:"Location,omitempty"` +} + +// CatalogStorageProfiles represents a container for storage profiles used by this catalog +// Type: CatalogStorageProfiles +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a container for storage profiles used by this catalog +// Since: 5.5 +type CatalogStorageProfiles struct { + VdcStorageProfile []*Reference `xml:"VdcStorageProfile,omitempty"` +} + +// Owner represents the owner of this entity. +// Type: OwnerType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the owner of this entity. +// Since: 1.5 +type Owner struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link LinkList `xml:"Link,omitempty"` + User *Reference `xml:"User"` +} + +// Error is the standard error message type used in the vCloud REST API. +// Type: ErrorType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: The standard error message type used in the vCloud REST API. +// Since: 0.9 +type Error struct { + Message string `xml:"message,attr"` + MajorErrorCode int `xml:"majorErrorCode,attr"` + MinorErrorCode string `xml:"minorErrorCode,attr"` + VendorSpecificErrorCode string `xml:"vendorSpecificErrorCode,attr,omitempty"` + StackTrace string `xml:"stackTrace,attr,omitempty"` +} + +func (err Error) Error() string { + return fmt.Sprintf("API Error: %d: %s", err.MajorErrorCode, err.Message) +} + +// NSXError is the standard error message type used in the NSX API which is proxied by vCD. +// It has attached method `Error() string` and implements Go's default `type error` interface. +type NSXError struct { + XMLName xml.Name `xml:"error"` + ErrorCode string `xml:"errorCode"` + Details string `xml:"details"` + ModuleName string `xml:"moduleName"` +} + +// Error method implements Go's default `error` interface for NSXError and formats NSX error +// output for human readable output. +func (nsxErr NSXError) Error() string { + return fmt.Sprintf("%s %s (API error: %s)", nsxErr.ModuleName, nsxErr.Details, nsxErr.ErrorCode) +} + +// File represents a file to be transferred (uploaded or downloaded). +// Type: FileType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a file to be transferred (uploaded or downloaded). +// Since: 0.9 +type File struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Size int64 `xml:"size,attr,omitempty"` + BytesTransferred int64 `xml:"bytesTransferred,attr,omitempty"` + Checksum string `xml:"checksum,attr,omitempty"` + Description string `xml:"Description,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +// FilesList represents a list of files to be transferred (uploaded or downloaded). +// Type: FilesListType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of files to be transferred (uploaded or downloaded). +// Since: 0.9 +type FilesList struct { + File []*File `xml:"File"` +} + +// UndeployVAppParams parameters to an undeploy vApp request. +// Type: UndeployVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Parameters to an undeploy vApp request. +// Since: 0.9 +type UndeployVAppParams struct { + Xmlns string `xml:"xmlns,attr"` + UndeployPowerAction string `xml:"UndeployPowerAction,omitempty"` +} + +// VmCapabilities allows you to specify certain capabilities of this virtual machine. +// Type: VmCapabilitiesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Allows you to specify certain capabilities of this virtual machine. +// Since: 5.1 +type VmCapabilities struct { + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + MemoryHotAddEnabled bool `xml:"MemoryHotAddEnabled,omitempty"` + CPUHotAddEnabled bool `xml:"CpuHotAddEnabled,omitempty"` + Link LinkList `xml:"Link,omitempty"` +} + +// VMs represents a list of virtual machines. +// Type: VmsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a list of virtual machines. +// Since: 5.1 +type VMs struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link LinkList `xml:"Link,omitempty"` + VMReference []*Reference `xml:"VmReference,omitempty"` +} + +/* + * Types that are completely valid (position, comment, coverage complete) + */ + +// ComposeVAppParams represents vApp composition parameters +// Type: ComposeVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp composition parameters. +// Since: 0.9 +type ComposeVAppParams struct { + XMLName xml.Name `xml:"ComposeVAppParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + Deploy bool `xml:"deploy,attr"` // True if the vApp should be deployed at instantiation. Defaults to true. + PowerOn bool `xml:"powerOn,attr"` // True if the vApp should be powered-on at instantiation. Defaults to true. + LinkedClone bool `xml:"linkedClone,attr,omitempty"` // Reserved. Unimplemented. + // Elements + Description string `xml:"Description,omitempty"` // Optional description. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // Instantiation parameters for the composed vApp. + SourcedItem *SourcedCompositionItemParam `xml:"SourcedItem,omitempty"` // Composition item. One of: vApp vAppTemplate VM. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` // True confirms acceptance of all EULAs in a vApp template. Instantiation fails if this element is missing, empty, or set to false and one or more EulaSection elements are present. +} + +type ReComposeVAppParams struct { + XMLName xml.Name `xml:"RecomposeVAppParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + Deploy bool `xml:"deploy,attr"` // True if the vApp should be deployed at instantiation. Defaults to true. + PowerOn bool `xml:"powerOn,attr"` // True if the vApp should be powered-on at instantiation. Defaults to true. + LinkedClone bool `xml:"linkedClone,attr,omitempty"` // Reserved. Unimplemented. + // Elements + Description string `xml:"Description,omitempty"` // Optional description. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // Instantiation parameters for the composed vApp. + SourcedItem *SourcedCompositionItemParam `xml:"SourcedItem,omitempty"` // Composition item. One of: vApp vAppTemplate VM. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` + DeleteItem *DeleteItem `xml:"DeleteItem,omitempty"` +} + +// SmallRecomposeVappParams is used to update name and description of a vApp +// Using the full definition (ReComposeVAppParams), the description can be changed but not removed +type SmallRecomposeVappParams struct { + XMLName xml.Name `xml:"RecomposeVAppParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + Name string `xml:"name,attr"` + Deploy bool `xml:"deploy,attr"` + Description string `xml:"Description"` +} + +type DeleteItem struct { + HREF string `xml:"href,attr,omitempty"` +} + +// SourcedCompositionItemParam represents a vApp, vApp template or VM to include in a composed vApp. +// Type: SourcedCompositionItemParamType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp, vApp template or VM to include in a composed vApp. +// Since: 0.9 +type SourcedCompositionItemParam struct { + // Attributes + SourceDelete bool `xml:"sourceDelete,attr,omitempty"` // True if the source item should be deleted after composition is complete. + // Elements + Source *Reference `xml:"Source"` // Reference to a vApp, vApp template or virtual machine to include in the composition. Changing the name of the newly created VM by specifying name attribute is deprecated. Include VmGeneralParams element instead. + VMGeneralParams *VMGeneralParams `xml:"VmGeneralParams,omitempty"` // Specify name, description, and other properties of a VM during instantiation. + VAppScopedLocalID string `xml:"VAppScopedLocalId,omitempty"` // If Source references a VM, this value provides a unique identifier for the VM in the scope of the composed vApp. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // If Source references a VM this can include any of the following OVF sections: VirtualHardwareSection OperatingSystemSection NetworkConnectionSection GuestCustomizationSection. + NetworkAssignment []*NetworkAssignment `xml:"NetworkAssignment,omitempty"` // If Source references a VM, this element maps a network name specified in the VM to the network name of a vApp network defined in the composed vApp. + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // If Source references a VM, this element contains a reference to a storage profile to be used for the VM. The specified storage profile must exist in the organization vDC that contains the composed vApp. If not specified, the default storage profile for the vDC is used. + LocalityParams *LocalityParams `xml:"LocalityParams,omitempty"` // Represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM and an independent a Disk so that the VM can make efficient use of the disk. + ComputePolicy *ComputePolicy `xml:"ComputePolicy,omitempty"` // accessible only from version API 33.0 +} + +// LocalityParams represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM with respect to another VM or an independent disk. +// Type: LocalityParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents locality parameters. Locality parameters provide a hint that may help the placement engine optimize placement of a VM with respect to another VM or an independent disk. +// Since: 5.1 +type LocalityParams struct { + // Elements + ResourceEntity *Reference `xml:"ResourceEntity,omitempty"` // Reference to a Disk, or a VM. +} + +// NetworkAssignment maps a network name specified in a VM to the network name of a vApp network defined in the VApp that contains the VM +// Type: NetworkAssignmentType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Maps a network name specified in a VM to the network name of a vApp network defined in the VApp that contains the VM +// Since: 0.9 +type NetworkAssignment struct { + // Attributes + InnerNetwork string `xml:"innerNetwork,attr"` // Name of the network as specified in the VM. + ContainerNetwork string `xml:"containerNetwork,attr"` // Name of the vApp network to map to. +} + +// VMGeneralParams a set of overrides to source VM properties to apply to target VM during copying. +// Type: VmGeneralParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A set of overrides to source VM properties to apply to target VM during copying. +// Since: 5.6 +type VMGeneralParams struct { + // Elements + Name string `xml:"Name,omitempty"` // Name of VM + Description string `xml:"Description,omitempty"` // VM description + NeedsCustomization bool `xml:"NeedsCustomization,omitempty"` // True if this VM needs guest customization + RegenerateBiosUuid bool `xml:"RegenerateBiosUuid,omitempty"` // True if BIOS UUID of the virtual machine should be regenerated so that it is unique, and not the same as the source virtual machine's BIOS UUID. +} + +// VApp represents a vApp +// Type: VAppType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp. +// Since: 0.9 +type VApp struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + Deployed bool `xml:"deployed,attr,omitempty"` // True if the virtual machine is deployed. + OvfDescriptorUploaded bool `xml:"ovfDescriptorUploaded,attr,omitempty"` // Read-only indicator that the OVF descriptor for this vApp has been uploaded. + // Elements + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + LeaseSettingsSection *LeaseSettingsSection `xml:"LeaseSettingsSection,omitempty"` // A reference to the lease section of the vApp + NetworkConfigSection *NetworkConfigSection `xml:"NetworkConfigSection,omitempty"` // Represents vAPP network configuration + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"Files,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + // TODO: OVF Sections to be implemented + // Section OVF_Section `xml:"Section"` + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the vApp. + Owner *Owner `xml:"Owner,omitempty"` // vApp owner. + IsAutoNature bool `xml:"autoNature,omitempty"` // True if the vApp is auto generated with a standalone VM + InMaintenanceMode bool `xml:"InMaintenanceMode,omitempty"` // True if this vApp is in maintenance mode. Prevents users from changing vApp metadata. + Children *VAppChildren `xml:"Children,omitempty"` // Container for virtual machines included in this vApp. + ProductSection *ProductSection `xml:"ProductSection,omitempty"` +} + +type ProductSectionList struct { + XMLName xml.Name `xml:"ProductSectionList"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xmlns string `xml:"xmlns,attr"` + ProductSection *ProductSection `xml:"http://schemas.dmtf.org/ovf/envelope/1 ProductSection,omitempty"` +} + +// SortByPropertyKeyName allows to sort ProductSectionList property slice by key name as the API is +// does not always return an ordered slice +func (p *ProductSectionList) SortByPropertyKeyName() { + sort.SliceStable(p.ProductSection.Property, func(i, j int) bool { + return p.ProductSection.Property[i].Key < p.ProductSection.Property[j].Key + }) +} + +type ProductSection struct { + Info string `xml:"Info,omitempty"` + Property []*Property `xml:"http://schemas.dmtf.org/ovf/envelope/1 Property,omitempty"` +} + +type Property struct { + Key string `xml:"http://schemas.dmtf.org/ovf/envelope/1 key,attr,omitempty"` + Label string `xml:"http://schemas.dmtf.org/ovf/envelope/1 Label,omitempty"` + Description string `xml:"http://schemas.dmtf.org/ovf/envelope/1 Description,omitempty"` + DefaultValue string `xml:"http://schemas.dmtf.org/ovf/envelope/1 value,attr"` + Value *Value `xml:"http://schemas.dmtf.org/ovf/envelope/1 Value,omitempty"` + Type string `xml:"http://schemas.dmtf.org/ovf/envelope/1 type,attr,omitempty"` + UserConfigurable bool `xml:"http://schemas.dmtf.org/ovf/envelope/1 userConfigurable,attr"` +} + +type Value struct { + Value string `xml:"http://schemas.dmtf.org/ovf/envelope/1 value,attr,omitempty"` +} + +type MetadataValue struct { + XMLName xml.Name `xml:"MetadataValue"` + Xsi string `xml:"xmlns:xsi,attr"` + Xmlns string `xml:"xmlns,attr"` + TypedValue *TypedValue `xml:"TypedValue"` +} + +type TypedValue struct { + XsiType string `xml:"xsi:type,attr"` + Value string `xml:"Value"` +} + +// Type: MetadataType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: User-defined metadata associated with with an object. +// Since: 1.5 +type Metadata struct { + XMLName xml.Name `xml:"Metadata"` + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr"` + Type string `xml:"type,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr"` + Link []*Link `xml:"Link,omitempty"` + MetadataEntry []*MetadataEntry `xml:"MetadataEntry,omitempty"` +} + +// Type: MetadataEntryType +// Namespace: http://www.vmware.com/vcloud/v1.5 +type MetadataEntry struct { + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr"` + Type string `xml:"type,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr"` + Domain string `xml:"Domain,omitempty"` // A value of SYSTEM places this MetadataEntry in the SYSTEM domain. Omit or leave empty to place this MetadataEntry in the GENERAL domain. + Key string `xml:"Key"` // An arbitrary key name. Length cannot exceed 256 UTF-8 characters. + Link []*Link `xml:"Link,omitempty"` //A reference to an entity or operation associated with this object. + TypedValue *TypedValue `xml:"TypedValue"` +} + +// VAppChildren is a container for virtual machines included in this vApp. +// Type: VAppChildrenType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for virtual machines included in this vApp. +// Since: 0.9 +type VAppChildren struct { + VM []*Vm `xml:"Vm,omitempty"` // Represents a virtual machine. +} + +// TasksInProgress is a list of queued, running, or recently completed tasks. +// Type: TasksInProgressType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of queued, running, or recently completed tasks. +// Since: 0.9 +type TasksInProgress struct { + // Elements + Task []*Task `xml:"Task"` // A task. +} + +// VAppTemplateChildren is a container for virtual machines included in this vApp template. +// Type: VAppTemplateChildrenType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for virtual machines included in this vApp template. +// Since: 0.9 +type VAppTemplateChildren struct { + // Elements + VM []*VAppTemplate `xml:"Vm"` // Represents a virtual machine in this vApp template. +} + +// VAppTemplate represents a vApp template. +// Type: VAppTemplateType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a vApp template. +// Since: 0.9 +type VAppTemplate struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + OvfDescriptorUploaded string `xml:"ovfDescriptorUploaded,attr,omitempty"` // True if the OVF descriptor for this template has been uploaded. + GoldMaster bool `xml:"goldMaster,attr,omitempty"` // True if this template is a gold master. + // Elements + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"Files,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + Owner *Owner `xml:"Owner,omitempty"` // vAppTemplate owner. + Children *VAppTemplateChildren `xml:"Children,omitempty"` // Container for virtual machines included in this vApp template. + VAppScopedLocalID string `xml:"VAppScopedLocalId"` // A unique identifier for the VM in the scope of the vApp template. + DefaultStorageProfile string `xml:"DefaultStorageProfile,omitempty"` // The name of the storage profile to be used for this object. The named storage profile must exist in the organization vDC that contains the object. If not specified, the default storage profile for the vDC is used. + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the template. + // FIXME: Upstream bug? Missing NetworkConfigSection, LeaseSettingSection and + // CustomizationSection at least, NetworkConnectionSection is required when + // using ComposeVApp action in the context of a Children VM (still + // referenced by VAppTemplateType). + NetworkConfigSection *NetworkConfigSection `xml:"NetworkConfigSection,omitempty"` + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + LeaseSettingsSection *LeaseSettingsSection `xml:"LeaseSettingsSection,omitempty"` + CustomizationSection *CustomizationSection `xml:"CustomizationSection,omitempty"` + // OVF Section needs to be added + // Section Section `xml:"Section,omitempty"` +} + +// VMDiskChange represents a virtual machine only with Disk setting update part +type VMDiskChange struct { + XMLName xml.Name `xml:"Vm"` + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + + HREF string `xml:"href,attr,omitempty"` // The URI of the VM entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity - application/vnd.vmware.vcloud.vm+xml + Name string `xml:"name,attr"` // VM name + Description string `xml:"Description,omitempty"` // Optional description. + ID string `xml:"id,attr,omitempty"` // VM ID. The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. + + VmSpecSection *VmSpecSection `xml:"VmSpecSection,omitempty"` // Container for the specification of this virtual machine. This is an alternative to using ovf:VirtualHardwareSection + ovf:OperatingSystemSection +} + +// DiskSection from Vm/VmSpecSection struct +type DiskSection struct { + DiskSettings []*DiskSettings `xml:"DiskSettings"` +} + +// DiskSettings from Vm/VmSpecSection/DiskSection struct +type DiskSettings struct { + DiskId string `xml:"DiskId,omitempty"` // Specifies a unique identifier for this disk in the scope of the corresponding VM. This element is optional when creating a VM, but if it is provided it should be unique. This element is mandatory when updating an existing disk. + SizeMb int64 `xml:"SizeMb"` // The size of the disk in MB. + UnitNumber int `xml:"UnitNumber"` // The device number on the SCSI or IDE controller of the disk. + BusNumber int `xml:"BusNumber"` // The number of the SCSI or IDE controller itself. + AdapterType string `xml:"AdapterType"` // The type of disk controller, e.g. IDE vs SCSI and if SCSI bus-logic vs LSI logic. + ThinProvisioned *bool `xml:"ThinProvisioned,omitempty"` // Specifies whether the disk storage is pre-allocated or allocated on demand. + Disk *Reference `xml:"Disk,omitempty"` // Specifies reference to a named disk. + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // Specifies reference to a storage profile to be associated with the disk. + OverrideVmDefault bool `xml:"overrideVmDefault"` // Specifies that the disk storage profile overrides the VM's default storage profile. + Iops *int64 `xml:"iops,omitempty"` // Specifies the IOPS for the disk. + VirtualQuantity *int64 `xml:"VirtualQuantity,omitempty"` // The actual size of the disk. + VirtualQuantityUnit string `xml:"VirtualQuantityUnit,omitempty"` // The units in which VirtualQuantity is measured. +} + +// MediaSection from Vm/VmSpecSection struct +type MediaSection struct { + MediaSettings []*MediaSettings `xml:"MediaSettings"` +} + +// MediaSettings from Vm/VmSpecSection/MediaSection struct +type MediaSettings struct { + DeviceId string `xml:"DeviceId,omitempty"` // Describes the media device whose media mount is being specified here. This deviceId must match the RASD.InstanceID attribute in the VirtualHardwareSection of the vApp's OVF description. + MediaImage *Reference `xml:"MediaImage,omitempty"` // The media image that is mounted onto the device. This property can be 'null' which represents that no media is mounted on the device. + MediaType string `xml:"MediaType,omitempty"` // Specified the type of media that is mounted onto the device. + MediaState string `xml:"MediaState,omitempty"` // Specifies the state of the media device. + UnitNumber int `xml:"UnitNumber"` // Specified the type of media that is mounted onto the device. + BusNumber int `xml:"BusNumber"` // The bus number of the media device controller. + AdapterType string `xml:"AdapterType,omitempty"` // The type of controller, e.g. IDE vs SCSI and if SCSI bus-logic vs LSI logic +} + +// CpuResourceMhz from Vm/VmSpecSection struct +type CpuResourceMhz struct { + Configured int64 `xml:"Configured"` // The amount of resource configured on the virtual machine. + Reservation *int64 `xml:"Reservation,omitempty"` // The amount of reservation of this resource on the underlying virtualization infrastructure. + Limit *int64 `xml:"Limit,omitempty"` // The limit for how much of this resource can be consumed on the underlying virtualization infrastructure. This is only valid when the resource allocation is not unlimited. + SharesLevel string `xml:"SharesLevel,omitempty"` // Pre-determined relative priorities according to which the non-reserved portion of this resource is made available to the virtualized workload. + Shares *int `xml:"Shares,omitempty"` // Custom priority for the resource. This field is read-only, unless the shares level is CUSTOM. +} + +// MemoryResourceMb from Vm/VmSpecSection struct +type MemoryResourceMb struct { + Configured int64 `xml:"Configured"` // The amount of resource configured on the virtual machine. + Reservation *int64 `xml:"Reservation,omitempty"` // The amount of reservation of this resource on the underlying virtualization infrastructure. + Limit *int64 `xml:"Limit,omitempty"` // The limit for how much of this resource can be consumed on the underlying virtualization infrastructure. This is only valid when the resource allocation is not unlimited. + SharesLevel string `xml:"SharesLevel,omitempty"` // Pre-determined relative priorities according to which the non-reserved portion of this resource is made available to the virtualized workload. + Shares *int `xml:"Shares,omitempty"` // Custom priority for the resource. This is a read-only, unless the share level is CUSTOM. +} + +// HardwareVersion from Vm/VmSpecSection struct +type HardwareVersion struct { + HREF string `xml:"href,attr"` + Type string `xml:"type,attr,omitempty"` + Value string `xml:",chardata"` +} + +// ovf:VirtualHardwareSection from Vm struct +type VirtualHardwareSection struct { + // Extends OVF Section_Type + XMLName xml.Name `xml:"VirtualHardwareSection"` + Xmlns string `xml:"vcloud,attr,omitempty"` + + Info string `xml:"Info"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Item []*VirtualHardwareItem `xml:"Item,omitempty"` +} + +// Each ovf:Item parsed from the ovf:VirtualHardwareSection +type VirtualHardwareItem struct { + XMLName xml.Name `xml:"Item"` + ResourceType int `xml:"ResourceType,omitempty"` + ResourceSubType string `xml:"ResourceSubType,omitempty"` + ElementName string `xml:"ElementName,omitempty"` + Description string `xml:"Description,omitempty"` + InstanceID int `xml:"InstanceID,omitempty"` + AutomaticAllocation bool `xml:"AutomaticAllocation,omitempty"` + Address string `xml:"Address,omitempty"` + AddressOnParent int `xml:"AddressOnParent,omitempty"` + AllocationUnits string `xml:"AllocationUnits,omitempty"` + Reservation int `xml:"Reservation,omitempty"` + VirtualQuantity int64 `xml:"VirtualQuantity,omitempty"` + Weight int `xml:"Weight,omitempty"` + CoresPerSocket int `xml:"CoresPerSocket,omitempty"` + Connection []*VirtualHardwareConnection `xml:"Connection,omitempty"` + HostResource []*VirtualHardwareHostResource `xml:"HostResource,omitempty"` + Link []*Link `xml:"Link,omitempty"` + // Reference: https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/operations/GET-DisksRasdItemsList-vApp.html + Parent int `xml:"Parent,omitempty"` +} + +// Connection info from ResourceType=10 (Network Interface) +type VirtualHardwareConnection struct { + IPAddress string `xml:"ipAddress,attr,omitempty"` + PrimaryConnection bool `xml:"primaryNetworkConnection,attr,omitempty"` + IpAddressingMode string `xml:"ipAddressingMode,attr,omitempty"` + NetworkName string `xml:",chardata"` +} + +// HostResource info from ResourceType=17 (Hard Disk) +// Reference: vCloud API Programming Guide for Service Providers vCloud API 30.0, Page 188 - 189 +// https://vdc-download.vmware.com/vmwb-repository/dcr-public/1b6cf07d-adb3-4dba-8c47-9c1c92b04857/ +// def8435d-a54a-4923-b26a-e2d1915b09c3/vcloud_sp_api_guide_30_0.pdf +type VirtualHardwareHostResource struct { + BusType int `xml:"busType,attr,omitempty"` + BusSubType string `xml:"busSubType,attr,omitempty"` + Capacity int `xml:"capacity,attr,omitempty"` + StorageProfile string `xml:"storageProfileHref,attr,omitempty"` + OverrideVmDefault bool `xml:"storageProfileOverrideVmDefault,attr,omitempty"` + Disk string `xml:"disk,attr,omitempty"` + //Iops int `xml:"iops,attr,omitempty"` + //OsType string `xml:"osType,attr,omitempty"` +} + +// SnapshotSection from Vm struct +type SnapshotSection struct { + // Extends OVF Section_Type + XMLName xml.Name `xml:"SnapshotSection"` + Info string `xml:"Info"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Snapshot []*SnapshotItem `xml:"Snapshot,omitempty"` +} + +// Each snapshot listed in the SnapshotSection +type SnapshotItem struct { + Created string `xml:"created,attr,omitempty"` + PoweredOn bool `xml:"poweredOn,attr,omitempty"` + Size int `xml:"size,attr,omitempty"` +} + +// OVFItem is a horrible kludge to process OVF, needs to be fixed with proper types. +type OVFItem struct { + XMLName xml.Name `xml:"vcloud:Item"` + XmlnsRasd string `xml:"xmlns:rasd,attr"` + XmlnsVCloud string `xml:"xmlns:vcloud,attr"` + XmlnsXsi string `xml:"xmlns:xsi,attr"` + XmlnsVmw string `xml:"xmlns:vmw,attr,omitempty"` + VCloudHREF string `xml:"vcloud:href,attr"` + VCloudType string `xml:"vcloud:type,attr"` + AllocationUnits string `xml:"rasd:AllocationUnits"` + Description string `xml:"rasd:Description"` + ElementName string `xml:"rasd:ElementName"` + InstanceID int `xml:"rasd:InstanceID"` + Reservation int `xml:"rasd:Reservation"` + ResourceType int `xml:"rasd:ResourceType"` + VirtualQuantity int64 `xml:"rasd:VirtualQuantity"` + // Weight corresponds to Shares when used for CPU and/or memory settings + Weight int `xml:"rasd:Weight,omitempty"` + CoresPerSocket *int `xml:"vmw:CoresPerSocket,omitempty"` + Link *Link `xml:"vcloud:Link"` +} + +// DeployVAppParams are the parameters to a deploy vApp request +// Type: DeployVAppParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Parameters to a deploy vApp request. +// Since: 0.9 +type DeployVAppParams struct { + XMLName xml.Name `xml:"DeployVAppParams"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + PowerOn bool `xml:"powerOn,attr"` // Used to specify whether to power on vapp on deployment, if not set default value is true. + DeploymentLeaseSeconds int `xml:"deploymentLeaseSeconds,attr,omitempty"` // Lease in seconds for deployment. A value of 0 is replaced by the organization default deploymentLeaseSeconds value. + ForceCustomization bool `xml:"forceCustomization,attr,omitempty"` // Used to specify whether to force customization on deployment, if not set default value is false +} + +// GuestCustomizationStatusSection holds information about guest customization status +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/76f491b4-679c-4e1e-8428-f813d668297a/a2555a1b-22f1-4cca-b481-2a98ab874022/doc/doc/operations/GET-GuestCustStatus.html +type GuestCustomizationStatusSection struct { + XMLName xml.Name `xml:"GuestCustomizationStatusSection"` + Xmlns string `xml:"xmlns,attr"` + + GuestCustStatus string `xml:"GuestCustStatus"` +} + +// GuestCustomizationSection represents guest customization settings +// Type: GuestCustomizationSectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a guest customization settings. +// Since: 1.0 +type GuestCustomizationSection struct { + // Extends OVF Section_Type + // Attributes + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + + HREF string `xml:"href,attr,omitempty"` // A reference to the section in URL format. + Type string `xml:"type,attr,omitempty"` // The MIME type of the section. + // FIXME: Fix the OVF section + Info string `xml:"ovf:Info"` + // Elements + Enabled *bool `xml:"Enabled,omitempty"` // True if guest customization is enabled. + ChangeSid *bool `xml:"ChangeSid,omitempty"` // True if customization can change the Windows SID of this virtual machine. + VirtualMachineID string `xml:"VirtualMachineId,omitempty"` // Virtual machine ID to apply. + JoinDomainEnabled *bool `xml:"JoinDomainEnabled,omitempty"` // True if this virtual machine can join a Windows Domain. + UseOrgSettings *bool `xml:"UseOrgSettings,omitempty"` // True if customization should use organization settings (OrgGuestPersonalizationSettings) when joining a Windows Domain. + DomainName string `xml:"DomainName,omitempty"` // The name of the Windows Domain to join. + DomainUserName string `xml:"DomainUserName,omitempty"` // User name to specify when joining a Windows Domain. + DomainUserPassword string `xml:"DomainUserPassword,omitempty"` // Password to use with DomainUserName. + MachineObjectOU string `xml:"MachineObjectOU,omitempty"` // The name of the Windows Domain Organizational Unit (OU) in which the computer account for this virtual machine will be created. + AdminPasswordEnabled *bool `xml:"AdminPasswordEnabled,omitempty"` // True if guest customization can modify administrator password settings for this virtual machine. + AdminPasswordAuto *bool `xml:"AdminPasswordAuto,omitempty"` // True if the administrator password for this virtual machine should be automatically generated. + AdminPassword string `xml:"AdminPassword,omitempty"` // True if the administrator password for this virtual machine should be set to this string. (AdminPasswordAuto must be false.) + AdminAutoLogonEnabled *bool `xml:"AdminAutoLogonEnabled,omitempty"` // True if guest administrator should automatically log into this virtual machine. + AdminAutoLogonCount int `xml:"AdminAutoLogonCount,omitempty"` // Number of times administrator can automatically log into this virtual machine. In case AdminAutoLogon is set to True, this value should be between 1 and 100. Otherwise, it should be 0. + ResetPasswordRequired *bool `xml:"ResetPasswordRequired,omitempty"` // True if the administrator password for this virtual machine must be reset after first use. + CustomizationScript string `xml:"CustomizationScript,omitempty"` // Script to run on guest customization. The entire script must appear in this element. Use the XML entity to represent a newline. Unicode characters can be represented in the form &#xxxx; where xxxx is the character number. + ComputerName string `xml:"ComputerName,omitempty"` // Computer name to assign to this virtual machine. + Link LinkList `xml:"Link,omitempty"` // A link to an operation on this section. +} + +// InstantiateVAppTemplateParams represents vApp template instantiation parameters. +// Type: InstantiateVAppTemplateParamsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents vApp template instantiation parameters. +// Since: 0.9 +type InstantiateVAppTemplateParams struct { + XMLName xml.Name `xml:"InstantiateVAppTemplateParams"` + Ovf string `xml:"xmlns:ovf,attr"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr"` + // Attributes + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + Deploy bool `xml:"deploy,attr"` // True if the vApp should be deployed at instantiation. Defaults to true. + PowerOn bool `xml:"powerOn,attr"` // True if the vApp should be powered-on at instantiation. Defaults to true. + LinkedClone bool `xml:"linkedClone,attr,omitempty"` // Reserved. Unimplemented. + // Elements + Description string `xml:"Description,omitempty"` // Optional description. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + InstantiationParams *InstantiationParams `xml:"InstantiationParams,omitempty"` // Instantiation parameters for the composed vApp. + Source *Reference `xml:"Source"` // A reference to a source object such as a vApp or vApp template. + IsSourceDelete bool `xml:"IsSourceDelete,omitempty"` // Set to true to delete the source object after the operation completes. + SourcedItem *SourcedCompositionItemParam `xml:"SourcedItem,omitempty"` // Composition item. One of: vApp vAppTemplate VM. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` // True confirms acceptance of all EULAs in a vApp template. Instantiation fails if this element is missing, empty, or set to false and one or more EulaSection elements are present. +} + +// EdgeGateway represents a gateway. +// Element: EdgeGateway +// Type: GatewayType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a gateway. +// Since: 5.1 +type EdgeGateway struct { + // Attributes + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the gateway. One of: 0 (The gateway is still being created) 1 (The gateway is ready) -1 (There was an error while creating the gateway). + // Elements + Link LinkList `xml:"Link,omitempty"` // A link to an operation on this section. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Configuration *GatewayConfiguration `xml:"Configuration"` // Gateway configuration. +} + +// GatewayConfiguration is the gateway configuration +// Type: GatewayConfigurationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Gateway Configuration. +// Since: 5.1 +type GatewayConfiguration struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // BackwardCompatibilityMode. Default is false. If set to true, will allow users to write firewall + // rules in the old 1.5 format. The new format does not require to use direction in firewall + // rules. Also, for firewall rules to allow NAT traffic the filter is applied on the original IP + // addresses. Once set to true cannot be reverted back to false. + BackwardCompatibilityMode bool `xml:"BackwardCompatibilityMode,omitempty"` + // GatewayBackingConfig defines configuration of the vShield edge VM for this gateway. One of: + // compact, full. + GatewayBackingConfig string `xml:"GatewayBackingConfig"` + // GatewayInterfaces holds configuration for edge gateway interfaces, ip allocations, traffic + // rate limits and ip sub-allocations + GatewayInterfaces *GatewayInterfaces `xml:"GatewayInterfaces"` + // EdgeGatewayServiceConfiguration represents Gateway Features. + EdgeGatewayServiceConfiguration *GatewayFeatures `xml:"EdgeGatewayServiceConfiguration,omitempty"` + // True if this gateway is highly available. (Requires two vShield edge VMs.) + HaEnabled *bool `xml:"HaEnabled,omitempty"` + // UseDefaultRouteForDNSRelay defines if the default gateway on the external network selected + // for default route should be used as the DNS relay. + UseDefaultRouteForDNSRelay *bool `xml:"UseDefaultRouteForDnsRelay,omitempty"` + // AdvancedNetworkingEnabled allows to use NSX capabilities such dynamic routing (BGP, OSPF), + // zero trust networking (DLR), enchanced VPN support (IPsec VPN, SSL VPN-Plus). + AdvancedNetworkingEnabled *bool `xml:"AdvancedNetworkingEnabled,omitempty"` + // DistributedRoutingEnabled enables distributed routing on the gateway to allow creation of + // many more organization VDC networks. Traffic in those networks is optimized for VM-to-VM + // communication. + DistributedRoutingEnabled *bool `xml:"DistributedRoutingEnabled,omitempty"` + // FipsModeEnabled allows any secure communication to or from the NSX Edge uses cryptographic + // algorithms or protocols that are allowed by United States Federal Information Processing + // Standards (FIPS). FIPS mode turns on the cipher suites that comply with FIPS. + FipsModeEnabled *bool `xml:"FipsModeEnabled,omitempty"` +} + +// GatewayInterfaces is a list of Gateway Interfaces. +// Type: GatewayInterfacesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: A list of Gateway Interfaces. +// Since: 5.1 +type GatewayInterfaces struct { + GatewayInterface []*GatewayInterface `xml:"GatewayInterface"` // Gateway Interface. +} + +// GatewayInterface is a gateway interface configuration. +// Type: GatewayInterfaceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Gateway Interface configuration. +// Since: 5.1 +type GatewayInterface struct { + Name string `xml:"Name,omitempty"` // Internally generated name for the Gateway Interface. + DisplayName string `xml:"DisplayName,omitempty"` // Gateway Interface display name. + Network *Reference `xml:"Network"` // A reference to the network connected to the gateway interface. + InterfaceType string `xml:"InterfaceType"` // The type of interface: One of: Uplink, Internal + SubnetParticipation []*SubnetParticipation `xml:"SubnetParticipation,omitempty"` // Slice of subnets for IP allocations. + ApplyRateLimit bool `xml:"ApplyRateLimit,omitempty"` // True if rate limiting is applied on this interface. + InRateLimit float64 `xml:"InRateLimit,omitempty"` // Incoming rate limit expressed as Gbps. + OutRateLimit float64 `xml:"OutRateLimit,omitempty"` // Outgoing rate limit expressed as Gbps. + UseForDefaultRoute bool `xml:"UseForDefaultRoute,omitempty"` // True if this network is default route for the gateway. +} + +// SortBySubnetParticipationGateway allows to sort SubnetParticipation property slice by gateway +// address +func (g *GatewayInterface) SortBySubnetParticipationGateway() { + sort.SliceStable(g.SubnetParticipation, func(i, j int) bool { + return g.SubnetParticipation[i].Gateway < g.SubnetParticipation[j].Gateway + }) +} + +// SubnetParticipation allows to chose which subnets a gateway can be a part of +// Type: SubnetParticipationType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Allows to chose which subnets a gateway can be part of +// Since: 5.1 +// +// Note. Field order is important and should not be changed as API returns errors if IPRanges come +// before Gateway and Netmask +type SubnetParticipation struct { + Gateway string `xml:"Gateway"` // Gateway for subnet + Netmask string `xml:"Netmask"` // Netmask for the subnet. + IPAddress string `xml:"IpAddress,omitempty"` // Ip Address to be assigned. Keep empty or omit element for auto assignment + IPRanges *IPRanges `xml:"IpRanges,omitempty"` // Range of IP addresses available for external interfaces. + UseForDefaultRoute bool `xml:"UseForDefaultRoute,omitempty"` // True if this network is default route for the gateway. +} + +type EdgeGatewayServiceConfiguration struct { + XMLName xml.Name `xml:"EdgeGatewayServiceConfiguration"` + Xmlns string `xml:"xmlns,attr,omitempty"` + GatewayDhcpService *GatewayDhcpService `xml:"GatewayDhcpService,omitempty"` + FirewallService *FirewallService `xml:"FirewallService,omitempty"` + NatService *NatService `xml:"NatService,omitempty"` + GatewayIpsecVpnService *GatewayIpsecVpnService `xml:"GatewayIpsecVpnService,omitempty"` // Substitute for NetworkService. Gateway Ipsec VPN service settings +} + +// GatewayFeatures represents edge gateway services. +// Element: EdgeGatewayServiceConfiguration +// Type: GatewayFeaturesType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents edge gateway services. +// Since: 5.1 +type GatewayFeatures struct { + XMLName xml.Name + Xmlns string `xml:"xmlns,attr,omitempty"` + FirewallService *FirewallService `xml:"FirewallService,omitempty"` // Substitute for NetworkService. Firewall service settings + NatService *NatService `xml:"NatService,omitempty"` // Substitute for NetworkService. NAT service settings + GatewayDhcpService *GatewayDhcpService `xml:"GatewayDhcpService,omitempty"` // Substitute for NetworkService. Gateway DHCP service settings + GatewayIpsecVpnService *GatewayIpsecVpnService `xml:"GatewayIpsecVpnService,omitempty"` // Substitute for NetworkService. Gateway Ipsec VPN service settings + StaticRoutingService *StaticRoutingService `xml:"StaticRoutingService,omitempty"` // Substitute for NetworkService. Static Routing service settings +} + +// StaticRoutingService represents Static Routing network service. +// Type: StaticRoutingServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents Static Routing network service. +// Since: 1.5 +type StaticRoutingService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + StaticRoute []*StaticRoute `xml:"StaticRoute,omitempty"` // Details of each Static Route. +} + +// StaticRoute represents a static route entry +// Type: StaticRouteType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: +// Since: +type StaticRoute struct { + Name string `xml:"Name"` // Name for the static route. + Network string `xml:"Network"` // Network specification in CIDR. + NextHopIP string `xml:"NextHopIp"` // IP Address of Next Hop router/gateway. + Interface string `xml:"Interface,omitempty"` // Interface to use for static routing. Internal and External are the supported values. + GatewayInterface *Reference `xml:"GatewayInterface,omitempty"` // Gateway interface to which static route is bound. +} + +// VendorTemplate is information about a vendor service template. This is optional. +// Type: VendorTemplateType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Information about a vendor service template. This is optional. +// Since: 5.1 +type VendorTemplate struct { + Name string `xml:"Name"` // Name of the vendor template. This is required. + ID string `xml:"Id"` // ID of the vendor template. This is required. +} + +// GatewayIpsecVpnService represents gateway IPsec VPN service. +// Type: GatewayIpsecVpnServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents gateway IPsec VPN service. +// Since: 5.1 +type GatewayIpsecVpnService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + Endpoint *GatewayIpsecVpnEndpoint `xml:"Endpoint,omitempty"` // List of IPsec VPN Service Endpoints. + Tunnel []*GatewayIpsecVpnTunnel `xml:"Tunnel"` // List of IPsec VPN tunnels. +} + +// GatewayIpsecVpnEndpoint represents an IPsec VPN endpoint. +// Type: GatewayIpsecVpnEndpointType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an IPsec VPN endpoint. +// Since: 5.1 +type GatewayIpsecVpnEndpoint struct { + Network *Reference `xml:"Network"` // External network reference. + PublicIP string `xml:"PublicIp,omitempty"` // Public IP for IPsec endpoint. +} + +// GatewayIpsecVpnTunnel represents an IPsec VPN tunnel. +// Type: GatewayIpsecVpnTunnelType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents an IPsec VPN tunnel. +// Since: 5.1 +type GatewayIpsecVpnTunnel struct { + Name string `xml:"Name"` // The name of the tunnel. + Description string `xml:"Description,omitempty"` // A description of the tunnel. + // TODO: Fix this in a better way + IpsecVpnThirdPartyPeer *IpsecVpnThirdPartyPeer `xml:"IpsecVpnThirdPartyPeer,omitempty"` // Details about the peer network. + IpsecVpnLocalPeer *IpsecVpnLocalPeer `xml:"IpsecVpnLocalPeer"` // Details about the local peer network. + PeerIPAddress string `xml:"PeerIpAddress"` // IP address of the peer endpoint. + PeerID string `xml:"PeerId"` // Id for the peer end point + LocalIPAddress string `xml:"LocalIpAddress"` // Address of the local network. + LocalID string `xml:"LocalId"` // Id for local end point + LocalSubnet []*IpsecVpnSubnet `xml:"LocalSubnet"` // List of local subnets in the tunnel. + PeerSubnet []*IpsecVpnSubnet `xml:"PeerSubnet"` // List of peer subnets in the tunnel. + SharedSecret string `xml:"SharedSecret"` // Shared secret used for authentication. + SharedSecretEncrypted bool `xml:"SharedSecretEncrypted,omitempty"` // True if shared secret is encrypted. + EncryptionProtocol string `xml:"EncryptionProtocol"` // Encryption protocol to be used. One of: AES, AES256, TRIPLEDES + Mtu int `xml:"Mtu"` // MTU for the tunnel. + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if the tunnel is enabled. + IsOperational bool `xml:"IsOperational,omitempty"` // True if the tunnel is operational. + ErrorDetails string `xml:"ErrorDetails,omitempty"` // Error details of the tunnel. +} + +// IpsecVpnThirdPartyPeer represents details about a peer network +type IpsecVpnThirdPartyPeer struct { + PeerID string `xml:"PeerId,omitempty"` // Id for the peer end point +} + +// IpsecVpnThirdPartyPeer represents details about a peer network +type IpsecVpnLocalPeer struct { + ID string `xml:"Id"` // Id for the peer end point + Name string `xml:"Name"` // Name for the peer +} + +// IpsecVpnSubnet represents subnet details. +// Type: IpsecVpnSubnetType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents subnet details. +// Since: 5.1 +type IpsecVpnSubnet struct { + Name string `xml:"Name"` // Gateway Name. + Gateway string `xml:"Gateway"` // Subnet Gateway. + Netmask string `xml:"Netmask"` // Subnet Netmask. +} + +// GatewayDhcpService represents Gateway DHCP service. +// Type: GatewayDhcpServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents Gateway DHCP service. +// Since: 5.1 +type GatewayDhcpService struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // Enable or disable the service using this flag + Pool []*DhcpPoolService `xml:"Pool,omitempty"` // A DHCP pool. +} + +// DhcpPoolService represents DHCP pool service. +// Type: DhcpPoolServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents DHCP pool service. +// Since: 5.1 +type DhcpPoolService struct { + IsEnabled bool `xml:"IsEnabled,omitempty"` // True if this DHCP Pool is enabled. + Network *Reference `xml:"Network"` // Org vDC network to which the DHCP range is applicable. + DefaultLeaseTime int `xml:"DefaultLeaseTime,omitempty"` // Default lease period for DHCP range. + MaxLeaseTime int `xml:"MaxLeaseTime"` // Maximum lease period for DHCP range. + LowIPAddress string `xml:"LowIpAddress"` // Low IP address in DHCP range. + HighIPAddress string `xml:"HighIpAddress"` // High IP address in DHCP range. +} + +// VMSelection represents details of an vm+nic+iptype selection. +// Type: VmSelectionType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents details of an vm+nic+iptype selection. +// Since: 5.1 +type VMSelection struct { + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. + IPType string `xml:"IpType"` // The value can be one of:- assigned: assigned internal IP be automatically choosen. NAT: NATed external IP will be automatically choosen. +} + +// FirewallRuleProtocols flags for a network protocol in a firewall rule +// Type: FirewallRuleType/Protocols +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: +// Since: +type FirewallRuleProtocols struct { + ICMP bool `xml:"Icmp,omitempty"` // True if the rule applies to the ICMP protocol. + Any bool `xml:"Any,omitempty"` // True if the rule applies to any protocol. + TCP bool `xml:"Tcp,omitempty"` // True if the rule applies to the TCP protocol. + UDP bool `xml:"Udp,omitempty"` // True if the rule applies to the UDP protocol. + // FIXME: this is supposed to extend protocol support to all the VSM supported protocols + // Other string `xml:"Other,omitempty"` // Any other protocol supported by vShield Manager +} + +// FirewallRule represents a firewall rule +// Type: FirewallRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a firewall rule. +// Since: 0.9 +type FirewallRule struct { + ID string `xml:"Id,omitempty"` // Firewall rule identifier. + IsEnabled bool `xml:"IsEnabled"` // Used to enable or disable the firewall rule. Default value is true. + MatchOnTranslate bool `xml:"MatchOnTranslate"` // For DNATed traffic, match the firewall rules only after the destination IP is translated. + Description string `xml:"Description,omitempty"` // A description of the rule. + Policy string `xml:"Policy,omitempty"` // One of: drop (drop packets that match the rule), allow (allow packets that match the rule to pass through the firewall) + Protocols *FirewallRuleProtocols `xml:"Protocols,omitempty"` // Specify the protocols to which the rule should be applied. + IcmpSubType string `xml:"IcmpSubType,omitempty"` // ICMP subtype. One of: address-mask-request, address-mask-reply, destination-unreachable, echo-request, echo-reply, parameter-problem, redirect, router-advertisement, router-solicitation, source-quench, time-exceeded, timestamp-request, timestamp-reply, any. + Port int `xml:"Port,omitempty"` // The port to which this rule applies. A value of -1 matches any port. + DestinationPortRange string `xml:"DestinationPortRange,omitempty"` // Destination port range to which this rule applies. + DestinationIP string `xml:"DestinationIp,omitempty"` // Destination IP address to which the rule applies. A value of Any matches any IP address. + DestinationVM *VMSelection `xml:"DestinationVm,omitempty"` // Details of the destination VM + SourcePort int `xml:"SourcePort,omitempty"` // Destination port to which this rule applies. A value of -1 matches any port. + SourcePortRange string `xml:"SourcePortRange,omitempty"` // Source port range to which this rule applies. + SourceIP string `xml:"SourceIp,omitempty"` // Source IP address to which the rule applies. A value of Any matches any IP address. + SourceVM *VMSelection `xml:"SourceVm,omitempty"` // Details of the source VM + Direction string `xml:"Direction,omitempty"` // Direction of traffic to which rule applies. One of: in (rule applies to incoming traffic. This is the default value), out (rule applies to outgoing traffic). + EnableLogging bool `xml:"EnableLogging"` // Used to enable or disable firewall rule logging. Default value is false. +} + +// FirewallService represent a network firewall service. +// Type: FirewallServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a network firewall service. +// Since: +type FirewallService struct { + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + DefaultAction string `xml:"DefaultAction,omitempty"` // Default action of the firewall. One of: drop (Default. Drop packets that match the rule.), allow (Allow packets that match the rule to pass through the firewall) + LogDefaultAction bool `xml:"LogDefaultAction"` // Flag to enable logging for default action. Default value is false. + FirewallRule []*FirewallRule `xml:"FirewallRule,omitempty"` // A firewall rule. +} + +// NatService represents a NAT network service. +// Type: NatServiceType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a NAT network service. +// Since: +type NatService struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + + IsEnabled bool `xml:"IsEnabled"` // Enable or disable the service using this flag + NatType string `xml:"NatType,omitempty"` // One of: ipTranslation (use IP translation), portForwarding (use port forwarding) + Policy string `xml:"Policy,omitempty"` // One of: allowTraffic (Allow all traffic), allowTrafficIn (Allow inbound traffic only) + NatRule []*NatRule `xml:"NatRule,omitempty"` // A NAT rule. + ExternalIP string `xml:"ExternalIp,omitempty"` // External IP address for rule. +} + +// NatRule represents a NAT rule. +// Type: NatRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a NAT rule. +// Since: 0.9 +type NatRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + Description string `xml:"Description,omitempty"` // A description of the rule. + RuleType string `xml:"RuleType,omitempty"` // Type of NAT rule. One of: SNAT (source NAT), DNAT (destination NAT) + IsEnabled *bool `xml:"IsEnabled"` // Used to enable or disable the firewall rule. + ID string `xml:"Id,omitempty"` // Firewall rule identifier. + GatewayNatRule *GatewayNatRule `xml:"GatewayNatRule,omitempty"` // Defines SNAT and DNAT types. + OneToOneBasicRule *NatOneToOneBasicRule `xml:"OneToOneBasicRule,omitempty"` // Maps one internal IP address to one external IP address. + OneToOneVMRule *NatOneToOneVMRule `xml:"OneToOneVmRule,omitempty"` // Maps one VM NIC to one external IP addresses. + PortForwardingRule *NatPortForwardingRule `xml:"PortForwardingRule,omitempty"` // Port forwarding internal to external IP addresses. + VMRule *NatVMRule `xml:"VmRule,omitempty"` // Port forwarding VM NIC to external IP addresses. +} + +// GatewayNatRule represents the SNAT and DNAT rules. +// Type: GatewayNatRuleType represents the SNAT and DNAT rules. +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the SNAT and DNAT rules. +// Since: 5.1 +type GatewayNatRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + Interface *Reference `xml:"Interface,omitempty"` // Interface to which rule is applied. + OriginalIP string `xml:"OriginalIp"` // Original IP for rule. + OriginalPort string `xml:"OriginalPort,omitempty"` // Original port for rule. + TranslatedIP string `xml:"TranslatedIp"` // Translated IP for rule. + TranslatedPort string `xml:"TranslatedPort,omitempty"` // Translated port for rule. + Protocol string `xml:"Protocol,omitempty"` // Protocol for rule. + IcmpSubType string `xml:"IcmpSubType,omitempty"` // ICMP subtype. One of: address-mask-request, address-mask-reply, destination-unreachable, echo-request, echo-reply, parameter-problem, redirect, router-advertisement, router-solicitation, source-quench, time-exceeded, timestamp-request, timestamp-reply, any. +} + +// NatOneToOneBasicRule represents the NAT basic rule for one to one mapping of internal and external IP addresses from a network. +// Type: NatOneToOneBasicRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT basic rule for one to one mapping of internal and external IP addresses from a network. +// Since: 0.9 +type NatOneToOneBasicRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + MappingMode string `xml:"MappingMode"` // One of: automatic (map IP addresses automatically), manual (map IP addresses manually using ExternalIpAddress and InternalIpAddress) + ExternalIPAddress string `xml:"ExternalIpAddress"` // External IP address to map. + InternalIPAddress string `xml:"InternalIpAddress"` // Internal IP address to map. +} + +// NatOneToOneVMRule represents the NAT rule for one to one mapping of VM NIC and external IP addresses from a network. +// Type: NatOneToOneVmRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for one to one mapping of VM NIC and external IP addresses from a network. +// Since: 0.9 +type NatOneToOneVMRule struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + // Elements + MappingMode string `xml:"MappingMode"` // Mapping mode. + ExternalIPAddress *string `xml:"ExternalIpAddress"` // External IP address to map. + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. +} + +// NatPortForwardingRule represents the NAT rule for port forwarding between internal IP/port and external IP/port. +// Type: NatPortForwardingRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for port forwarding between internal IP/port and external IP/port. +// Since: 0.9 +type NatPortForwardingRule struct { + ExternalIPAddress string `xml:"ExternalIpAddress"` // External IP address to map. + ExternalPort int `xml:"ExternalPort"` // External port to forward to. + InternalIPAddress string `xml:"InternalIpAddress"` // Internal IP address to map. + InternalPort int `xml:"InternalPort"` // Internal port to forward to. + Protocol string `xml:"Protocol,omitempty"` // Protocol to forward. One of: TCP (forward TCP packets), UDP (forward UDP packets), TCP_UDP (forward TCP and UDP packets). +} + +// NatVMRule represents the NAT rule for port forwarding between VM NIC/port and external IP/port. +// Type: NatVmRuleType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents the NAT rule for port forwarding between VM NIC/port and external IP/port. +// Since: 0.9 +type NatVMRule struct { + ExternalIPAddress string `xml:"ExternalIpAddress,omitempty"` // External IP address to map. + ExternalPort int `xml:"ExternalPort"` // External port to forward to. + VAppScopedVMID string `xml:"VAppScopedVmId"` // VAppScopedVmId of VM to which this rule applies. + VMNicID int `xml:"VmNicId"` // VM NIC ID to which this rule applies. + InternalPort int `xml:"InternalPort"` // Internal port to forward to. + Protocol string `xml:"Protocol,omitempty"` // Protocol to forward. One of: TCP (forward TCP packets), UDP (forward UDP packets), TCP_UDP (forward TCP and UDP packets). +} + +// QueryResultEdgeGatewayRecordsType is a container for query results in records format. +// Type: QueryResultRecordsType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Container for query results in records format. +// Since: 1.5 +type QueryResultEdgeGatewayRecordsType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Name string `xml:"name,attr,omitempty"` // The name of the entity. + Page int `xml:"page,attr,omitempty"` // Page of the result set that this container holds. The first page is page number 1. + PageSize int `xml:"pageSize,attr,omitempty"` // Page size, as a number of records or references. + Total float64 `xml:"total,attr,omitempty"` // Total number of records or references in the container. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + EdgeGatewayRecord []*QueryResultEdgeGatewayRecordType `xml:"EdgeGatewayRecord"` // A record representing a EdgeGateway result. +} + +type QueryResultRecordsType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Name string `xml:"name,attr,omitempty"` // The name of the entity. + Page int `xml:"page,attr,omitempty"` // Page of the result set that this container holds. The first page is page number 1. + PageSize int `xml:"pageSize,attr,omitempty"` // Page size, as a number of records or references. + Total float64 `xml:"total,attr,omitempty"` // Total number of records or references in the container. + // Elements + Link []*Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + EdgeGatewayRecord []*QueryResultEdgeGatewayRecordType `xml:"EdgeGatewayRecord"` // A record representing a EdgeGateway result. + VMRecord []*QueryResultVMRecordType `xml:"VMRecord"` // A record representing a VM result. + AdminVMRecord []*QueryResultVMRecordType `xml:"AdminVMRecord"` // A record representing a Admin VM result. + VAppRecord []*QueryResultVAppRecordType `xml:"VAppRecord"` // A record representing a VApp result. + AdminVAppRecord []*QueryResultVAppRecordType `xml:"AdminVAppRecord"` // A record representing a VApp result as admin. + OrgVdcStorageProfileRecord []*QueryResultOrgVdcStorageProfileRecordType `xml:"OrgVdcStorageProfileRecord"` // A record representing storage profiles + MediaRecord []*MediaRecordType `xml:"MediaRecord"` // A record representing media + AdminMediaRecord []*MediaRecordType `xml:"AdminMediaRecord"` // A record representing Admin media + VMWProviderVdcRecord []*QueryResultVMWProviderVdcRecordType `xml:"VMWProviderVdcRecord"` // A record representing a Provider VDC result. + ProviderVdcStorageProfileRecord []*QueryResultProviderVdcStorageProfileRecordType `xml:"ProviderVdcStorageProfileRecord"` // A record representing a Provider VDC storage profile result + NetworkPoolRecord []*QueryResultNetworkPoolRecordType `xml:"NetworkPoolRecord"` // A record representing a network pool + DiskRecord []*DiskRecordType `xml:"DiskRecord"` // A record representing a independent Disk. + AdminDiskRecord []*DiskRecordType `xml:"AdminDiskRecord"` // A record representing a independent Disk. + VirtualCenterRecord []*QueryResultVirtualCenterRecordType `xml:"VirtualCenterRecord"` // A record representing a vSphere server + PortGroupRecord []*PortGroupRecordType `xml:"PortgroupRecord"` // A record representing a port group + OrgVdcNetworkRecord []*QueryResultOrgVdcNetworkRecordType `xml:"OrgVdcNetworkRecord"` // A record representing a org VDC network + CatalogRecord []*CatalogRecord `xml:"CatalogRecord"` // A record representing a catalog + AdminCatalogRecord []*CatalogRecord `xml:"AdminCatalogRecord"` // A record representing an admin catalog + CatalogItemRecord []*QueryResultCatalogItemType `xml:"CatalogItemRecord"` // A record representing a catalog item + AdminCatalogItemRecord []*QueryResultCatalogItemType `xml:"AdminCatalogItemRecord"` // A record representing an admin catalog item + VappTemplateRecord []*QueryResultVappTemplateType `xml:"VAppTemplateRecord"` // A record representing a vApp template + AdminVappTemplateRecord []*QueryResultVappTemplateType `xml:"AdminVAppTemplateRecord"` // A record representing an admin vApp template + NsxtManagerRecord []*QueryResultNsxtManagerRecordType `xml:"NsxTManagerRecord"` // A record representing NSX-T manager + OrgVdcRecord []*QueryResultOrgVdcRecordType `xml:"OrgVdcRecord"` // A record representing Org VDC + OrgVdcAdminRecord []*QueryResultOrgVdcRecordType `xml:"AdminVdcRecord"` // A record representing Org VDC +} + +// QueryResultOrgVdcRecordType represents an Org VDC record +type QueryResultOrgVdcRecordType struct { + HREF string `xml:"href,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + ComputeProviderScope string `xml:"computeProviderScope,attr,omitempty"` + NetworkProviderScope string `xml:"networkProviderScope,attr,omitempty"` + IsEnabled string `xml:"isEnabled,attr,omitempty"` + CpuAllocationMhz *int `xml:"cpuAllocationMhz,attr,omitempty"` + CpuLimitMhz *int `xml:"cpuLimitMhz,attr,omitempty"` + CpuUsedMhz *int `xml:"cpuUsedMhz,attr,omitempty"` + MemoryAllocationMB *int `xml:"memoryAllocationMB,attr,omitempty"` + MemoryLimitMB *int `xml:"memoryLimitMB,attr,omitempty"` + MemoryUsedMB *int `xml:"memoryUsedMB,attr,omitempty"` + StorageLimitMB *int `xml:"storageLimitMB,attr,omitempty"` + StorageUsedMB *int `xml:"storageUsedMB,attr,omitempty"` + StorageOverheadMB *int `xml:"storageOverheadMB,attr,omitempty"` + MemoryOverheadMB *int `xml:"memoryOverheadMB,attr,omitempty"` + NumberOfVApps *int `xml:"numberOfVApps,attr,omitempty"` + NumberOfUnmanagedVApps *int `xml:"numberOfUnmanagedVApps,attr,omitempty"` + NumberOfMedia *int `xml:"numberOfMedia,attr,omitempty"` + NumberOfDisks *int `xml:"numberOfDisks,attr,omitempty"` + NumberOfVAppTemplates *int `xml:"numberOfVAppTemplates,attr,omitempty"` + NumberOfStorageProfiles *int `xml:"numberOfStorageProfiles,attr,omitempty"` + NumberOfVMs *int `xml:"numberOfVMs,attr,omitempty"` + NumberOfRunningVMs *int `xml:"numberOfRunningVMs,attr,omitempty"` + NumberOfDeployedVApps *int `xml:"numberOfDeployedVApps,attr,omitempty"` + NumberOfDeployedUnmanagedVApps *int `xml:"numberOfDeployedUnmanagedVApps,attr,omitempty"` + CpuOverheadMhz *int `xml:"cpuOverheadMhz,attr,omitempty"` + OrgName string `xml:"orgName,attr,omitempty"` + AllocationModel string `xml:"allocationModel,attr,omitempty"` + VcName string `xml:"vcName,attr,omitempty"` + IsBusy string `xml:"isBusy,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + TaskStatusName string `xml:"taskStatusName,attr,omitempty"` + Task string `xml:"task,attr,omitempty"` + TaskStatus string `xml:"taskStatus,attr,omitempty"` + TaskDetails string `xml:"taskDetails,attr,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` + + // Admin Org VDC fields + ProviderVdcName string `xml:"providerVdcName,attr,omitempty"` + ProviderVdc string `xml:"providerVdc,attr,omitempty"` + Org string `xml:"org,attr,omitempty"` + NetworkPool string `xml:"networkPool,attr,omitempty"` + NumberOfResourcePools *int `xml:"numberOfResourcePools,attr,omitempty"` + UsedNetworksInVdc string `xml:"usedNetworksInVdc,attr,omitempty"` + IsThinProvisioned string `xml:"isThinProvisioned,attr,omitempty"` + IsFastProvisioned string `xml:"isFastProvisioned,attr,omitempty"` + NetworkProviderType string `xml:"networkProviderType,attr,omitempty"` + IsVCEnabled string `xml:"isVCEnabled,attr,omitempty"` + MemoryReservedMB *int `xml:"memoryReservedMB,attr,omitempty"` + CpuReservedMhz *int `xml:"cpuReservedMhz,attr,omitempty"` + Vc string `xml:"vc,attr,omitempty"` +} + +// QueryResultCatalogItemType represents a catalog item as query result +type QueryResultCatalogItemType struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + ID string `xml:"id,attr,omitempty"` // Catalog Item ID. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Entity string `xml:"entity,attr,omitempty"` // Entity reference or ID + EntityName string `xml:"entityName,attr,omitempty"` // Entity name + EntityType string `xml:"entityType,attr,omitempty"` // Entity name + Catalog string `xml:"catalog,attr,omitempty"` // Catalog reference or ID + CatalogName string `xml:"catalogName,attr,omitempty"` // Catalog name + OwnerName string `xml:"ownerName,attr,omitempty"` // Owner name + Owner string `xml:"owner,attr,omitempty"` // Owner reference or ID + IsPublished bool `xml:"isPublished,attr,omitempty"` // True if this entity is in a published catalog + Vdc string `xml:"vdc,attr,omitempty"` // VDC reference or ID + VdcName string `xml:"vdcName,attr,omitempty"` // VDC name + IsVdcEnabled bool `xml:"isVdcEnabled,attr,omitempty"` // True if the containing VDC is enabled + CreationDate string `xml:"creationDate,attr,omitempty"` // Creation date + IsExpired bool `xml:"isExpired,attr,omitempty"` // True if this entity is expired + Status string `xml:"status,attr,omitempty"` // Status + Name string `xml:"name,attr,omitempty"` // Catalog Item name. + Link *Link `xml:"Link,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` +} + +// QueryResultVappTemplateType represents a vApp template as query result +type QueryResultVappTemplateType struct { + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + ID string `xml:"id,attr,omitempty"` // vApp template ID. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + OwnerName string `xml:"ownerName,attr,omitempty"` // Owner name + CatalogName string `xml:"catalogName,attr,omitempty"` // Catalog name + IsPublished bool `xml:"isPublished,attr,omitempty"` // True if this entity is in a published catalog + Name string `xml:"name,attr,omitempty"` // vApp template name. + Description string `xml:"description,attr,omitempty"` // vApp template description. + Vdc string `xml:"vdc,attr,omitempty"` // VDC reference or ID + VdcName string `xml:"vdcName,attr,omitempty"` // VDC name + Org string `xml:"org,attr,omitempty"` // Organization reference or ID + CreationDate string `xml:"creationDate,attr,omitempty"` // Creation date + IsBusy bool `xml:"isBusy,attr,omitempty"` // True if the vApp template is busy + IsGoldMaster bool `xml:"isGoldMaster,attr,omitempty"` // True if the vApp template is a gold master + IsEnabled bool `xml:"isEnabled,attr,omitempty"` // True if the vApp template is enabled + Status string `xml:"status,attr,omitempty"` // Status + IsDeployed bool `xml:"isDeployed,attr,omitempty"` // True if this entity is deployed + IsExpired bool `xml:"isExpired,attr,omitempty"` // True if this entity is expired + StorageProfileName string `xml:"storageProfileName,attr,omitempty"` // Storage profile name + Version string `xml:"version,attr,omitempty"` // Storage profile name + LastSuccessfulSync string `xml:"lastSuccessfulSync,attr,omitempty"` // Date of last successful sync + Link *Link `xml:"Link,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` +} + +// QueryResultEdgeGatewayRecordType represents an edge gateway record as query result. +type QueryResultEdgeGatewayRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + Name string `xml:"name,attr,omitempty"` // EdgeGateway name. + Vdc string `xml:"vdc,attr,omitempty"` // VDC Reference or ID + OrgVdcName string `xml:"orgVdcName,attr,omitempty"` // VDC name + NumberOfExtNetworks int `xml:"numberOfExtNetworks,attr,omitempty"` // Number of external networks connected to the edgeGateway. Yes Yes + NumberOfOrgNetworks int `xml:"numberOfOrgNetworks,attr,omitempty"` // Number of org VDC networks connected to the edgeGateway Yes Yes + IsBusy bool `xml:"isBusy,attr"` // True if this Edge Gateway is busy. Yes Yes + GatewayStatus string `xml:"gatewayStatus,attr,omitempty"` // + HaStatus string `xml:"haStatus,attr,omitempty"` // High Availability Status of the edgeGateway Yes Yes +} + +// QueryResultVMRecordType represents a VM record as query result. +type QueryResultVMRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + ID string `xml:"id,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` // VM name. + Type string `xml:"type,attr,omitempty"` // Contains the type of the resource. + ContainerName string `xml:"containerName,attr,omitempty"` // The name of the vApp or vApp template that contains this VM. + ContainerID string `xml:"container,attr,omitempty"` // The ID of the vApp or vApp template that contains this VM. + OwnerName string `xml:"ownerName,attr,omitempty"` + Owner string `xml:"owner,attr,omitempty"` + VdcHREF string `xml:"vdc,attr,omitempty"` + VAppTemplate bool `xml:"isVAppTemplate,attr,omitempty"` + Deleted bool `xml:"isDeleted,attr,omitempty"` + GuestOS string `xml:"guestOs,attr,omitempty"` + Cpus int `xml:"numberOfCpus,attr,omitempty"` + MemoryMB int `xml:"memoryMB,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + NetworkName string `xml:"networkName,attr,omitempty"` + NetworkHref string `xml:"network,attr,omitempty"` + IpAddress string `xml:"ipAddress,attr,omitempty"` // If configured, the IP Address of the VM on the primary network, otherwise empty. + Busy bool `xml:"isBusy,attr,omitempty"` + Deployed bool `xml:"isDeployed,attr,omitempty"` // True if the virtual machine is deployed. + Published bool `xml:"isPublished,attr,omitempty"` + CatalogName string `xml:"catalogName,attr,omitempty"` + HardwareVersion int `xml:"hardwareVersion,attr,omitempty"` + VmToolsStatus string `xml:"vmToolsStatus,attr,omitempty"` + MaintenanceMode bool `xml:"isInMaintenanceMode,attr,omitempty"` + AutoNature bool `xml:"isAutoNature,attr,omitempty"` // True if the parent vApp is a managed vApp + StorageProfileName string `xml:"storageProfileName,attr,omitempty"` + GcStatus string `xml:"gcStatus,attr,omitempty"` // GC status of this VM. + AutoUndeployDate string `xml:"autoUndeployDate,attr,omitempty"` + AutoDeleteDate string `xml:"autoDeleteDate,attr,omitempty"` + AutoUndeployNotified bool `xml:"isAutoUndeployNotified,attr,omitempty"` + AutoDeleteNotified bool `xml:"isAutoDeleteNotified,attr,omitempty"` + IsComputePolicyCompliant bool `xml:"isComputePolicyCompliant,attr,omitempty"` + VmSizingPolicyId string `xml:"vmSizingPolicyId,attr,omitempty"` + VmPlacementPolicyId string `xml:"vmPlacementPolicyId,attr,omitempty"` + Encrypted bool `xml:"encrypted,attr,omitempty"` + DateCreated string `xml:"dateCreated,attr,omitempty"` + TotalStorageAllocatedMb string `xml:"totalStorageAllocatedMb,attr,omitempty"` + IsExpired bool `xml:"isExpired,attr,omitempty"` + Link []*Link `xml:"Link,omitempty"` + MetaData *Metadata `xml:"Metadata,omitempty"` +} + +// QueryResultVAppRecordType represents a VM record as query result. +type QueryResultVAppRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Name string `xml:"name,attr"` // The name of the entity. + CreationDate string `xml:"creationDate,attr,omitempty"` // Creation date/time of the vApp. + Busy bool `xml:"isBusy,attr,omitempty"` + Deployed bool `xml:"isDeployed,attr,omitempty"` // True if the vApp is deployed. + Enabled bool `xml:"isEnabled,attr,omitempty"` + Expired bool `xml:"isExpired,attr,omitempty"` + MaintenanceMode bool `xml:"isInMaintenanceMode,attr,omitempty"` + Public bool `xml:"isPublic,attr,omitempty"` + OwnerName string `xml:"ownerName,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + VdcHREF string `xml:"vdc,attr,omitempty"` + VdcName string `xml:"vdcName,attr,omitempty"` + NumberOfVMs int `xml:"numberOfVMs,attr,omitempty"` + NumberOfCPUs int `xml:"numberOfCpus,attr,omitempty"` + CpuAllocationMhz int `xml:"cpuAllocationMhz,attr,omitempty"` + CpuAllocationInMhz int `xml:"cpuAllocationInMhz,attr,omitempty"` + StorageKB int `xml:"storageKB,attr,omitempty"` + MemoryAllocationMB int `xml:"memoryAllocationMB,attr,omitempty"` + AutoDeleteNotified bool `xml:"isAutoDeleteNotified,attr,omitempty"` + AutoUndeployNotified bool `xml:"isAutoUndeployNotified,attr,omitempty"` + VdcEnabled bool `xml:"isVdcEnabled,attr,omitempty"` + HonorBootOrder bool `xml:"honorBookOrder,attr,omitempty"` + HighestSupportedVersion int `xml:"pvdcHighestSupportedHardwareVersion,attr,omitempty"` + LowestHardwareVersion int `xml:"lowestHardwareVersionInVApp,attr,omitempty"` + TaskHREF string `xml:"task,attr,omitempty"` + TaskStatusName string `xml:"taskStatusName,attr,omitempty"` + TaskStatus string `xml:"TaskStatus,attr,omitempty"` + TaskDetails string `xml:"taskDetails,attr,omitempty"` + MetaData *Metadata `xml:"Metadata,omitempty"` +} + +// QueryResultOrgVdcStorageProfileRecordType represents a storage +// profile as query result. +type QueryResultOrgVdcStorageProfileRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Name string `xml:"name,attr,omitempty"` // Storage Profile name. + VdcHREF string `xml:"vdc,attr,omitempty"` + VdcName string `xml:"vdcName,attr,omitempty"` + IsDefaultStorageProfile bool `xml:"isDefaultStorageProfile,attr,omitempty"` + IsEnabled bool `xml:"isEnabled,attr,omitempty"` + IsVdcBusy bool `xml:"isVdcBusy,attr,omitempty"` + NumberOfConditions int `xml:"numberOfConditions,attr,omitempty"` + StorageUsedMB int `xml:"storageUsedMB,attr,omitempty"` + StorageLimitMB int `xml:"storageLimitMB,attr,omitempty"` +} + +// QueryResultVMWProviderVdcRecordType represents a Provider VDC as query result. +type QueryResultVMWProviderVdcRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Name string `xml:"name,attr,omitempty"` // Provider VDC name. + Status string `xml:"status,attr,omitempty"` + IsBusy bool `xml:"isBusy,attr,omitempty"` + IsDeleted bool `xml:"isDeleted,attr,omitempty"` + IsEnabled bool `xml:"isEnabled,attr,omitempty"` + CpuAllocationMhz int `xml:"cpuAllocationMhz,attr,omitempty"` + CpuLimitMhz int `xml:"cpuLimitMhz,attr,omitempty"` + CpuUsedMhz int `xml:"cpuUsedMhz,attr,omitempty"` + NumberOfDatastores int `xml:"numberOfDatastores,attr,omitempty"` + NumberOfStorageProfiles int `xml:"numberOfStorageProfiles,attr,omitempty"` + NumberOfVdcs int `xml:"numberOfVdcs,attr,omitempty"` + MemoryAllocationMB int64 `xml:"memoryAllocationMB,attr,omitempty"` + MemoryLimitMB int64 `xml:"memoryLimitMB,attr,omitempty"` + MemoryUsedMB int64 `xml:"memoryUsedMB,attr,omitempty"` + StorageAllocationMB int64 `xml:"storageAllocationMB,attr,omitempty"` + StorageLimitMB int64 `xml:"storageLimitMB,attr,omitempty"` + StorageUsedMB int64 `xml:"storageUsedMB,attr,omitempty"` + CpuOverheadMhz int64 `xml:"cpuOverheadMhz,attr,omitempty"` + StorageOverheadMB int64 `xml:"storageOverheadMB,attr,omitempty"` + MemoryOverheadMB int64 `xml:"memoryOverheadMB,attr,omitempty"` +} + +// QueryResultProviderVdcStorageProfileRecordType represents a Provider VDC storage profile as query result. +type QueryResultProviderVdcStorageProfileRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Name string `xml:"name,attr,omitempty"` // Provider VDC Storage Profile name. + ProviderVdcHREF string `xml:"providerVdc,attr,omitempty"` + VcHREF string `xml:"vc,attr,omitempty"` + StorageProfileMoref string `xml:"storageProfileMoref,attr,omitempty"` + IsEnabled bool `xml:"isEnabled,attr,omitempty"` + StorageProvisionedMB int64 `xml:"storageProvisionedMB,attr,omitempty"` + StorageRequestedMB int64 `xml:"storageRequestedMB,attr,omitempty"` + StorageTotalMB int64 `xml:"storageTotalMB,attr,omitempty"` + StorageUsedMB int64 `xml:"storageUsedMB,attr,omitempty"` + NumberOfConditions int `xml:"numberOfConditions,attr,omitempty"` +} + +// QueryResultNetworkPoolRecordType represents a network pool as query result. +type QueryResultNetworkPoolRecordType struct { + // Attributes + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Name string `xml:"name,attr,omitempty"` // Network pool name. + IsBusy bool `xml:"isBusy,attr,omitempty"` + NetworkPoolType int `xml:"networkPoolType,attr,omitempty"` +} + +// Type: QueryResultVirtualCenterRecordType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/QueryResultVirtualCenterRecordType.html +// Description: Type for a single virtualCenter query result in records format. +// Since: 1.5 +type QueryResultVirtualCenterRecordType struct { + HREF string `xml:"href,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + IsBusy bool `xml:"isBusy,attr,omitempty"` + IsEnabled bool `xml:"isEnabled,attr,omitempty"` + IsSupported bool `xml:"isSupported,attr,omitempty"` + ListenerState string `xml:"listenerState,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + Url string `xml:"url,attr,omitempty"` + UserName string `xml:"userName,attr,omitempty"` + VcVersion string `xml:"vcVersion,attr,omitempty"` + UUID string `xml:"uuid,attr,omitempty"` + VsmIP string `xml:"vsmIP,attr,omitempty"` +} + +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Retrieve a list of extension objects and operations. +// Since: 1.0 +type Extension struct { + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. +} + +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Retrieve a list of tasks +type TasksList struct { + XMLName xml.Name `xml:"TasksList"` + Task []*Task `xml:"Task,omitempty"` +} + +type ExternalNetworkReferences struct { + ExternalNetworkReference []*ExternalNetworkReference `xml:"ExternalNetworkReference,omitempty"` // A reference to an entity or operation associated with this object. +} + +type ExternalNetworkReference struct { + HREF string `xml:"href,attr"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` +} + +// Type: VimObjectRefType +// Namespace: http://www.vmware.com/vcloud/extension/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VimObjectRefsType.html +// Description: Represents the Managed Object Reference (MoRef) and the type of a vSphere object. +// Since: 0.9 +type VimObjectRef struct { + VimServerRef *Reference `xml:"VimServerRef"` + MoRef string `xml:"MoRef"` + VimObjectType string `xml:"VimObjectType"` +} + +// Type: VimObjectRefsType +// Namespace: http://www.vmware.com/vcloud/extension/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VimObjectRefsType.html +// Description: List of VimObjectRef elements. +// Since: 0.9 +type VimObjectRefs struct { + VimObjectRef []*VimObjectRef `xml:"VimObjectRef"` +} + +// Type: VMWExternalNetworkType +// Namespace: http://www.vmware.com/vcloud/extension/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/7a028e78-bd37-4a6a-8298-9c26c7eeb9aa/09142237-dd46-4dee-8326-e07212fb63a8/doc/doc/types/VMWExternalNetworkType.html +// Description: External network type. +// Since: 1.0 +type ExternalNetwork struct { + XMLName xml.Name `xml:"VMWExternalNetwork"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Link []*Link `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + Configuration *NetworkConfiguration `xml:"Configuration,omitempty"` + VimPortGroupRef *VimObjectRef `xml:"VimPortGroupRef,omitempty"` + VimPortGroupRefs *VimObjectRefs `xml:"VimPortGroupRefs,omitempty"` + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` +} + +// Type: MediaType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://vdc-repo.vmware.com/vmwb-repository/dcr-public/ca48e1bb-282b-4fdc-b827-649b819249ed/55142cf1-5bb8-4ab1-8d09-b84f717af5ec/doc/doc/types/MediaType.html +// Description: Represents Media image +// Since: 0.9 +type Media struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status int64 `xml:"status,attr,omitempty"` + ImageType string `xml:"imageType,attr,omitempty"` + Size int64 `xml:"size,attr,omitempty"` + Description string `xml:"Description,omitempty"` + Files *FilesList `xml:"Files,omitempty"` + Link LinkList `xml:"Link,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + Owner *Reference `xml:"Owner,omitempty"` + Entity *Entity `xml:"Entity"` +} + +// Type: MediaRecord +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://code.vmware.com/apis/287/vcloud#/doc/doc/operations/GET-MediasFromQuery.html +// Issue that description partly matches with what is returned +// Description: Represents Media record +// Since: 1.5 +type MediaRecordType struct { + HREF string `xml:"href,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + OwnerName string `xml:"ownerName,attr,omitempty"` + CatalogName string `xml:"catalogName,attr,omitempty"` + IsPublished bool `xml:"isPublished,attr,omitempty"` + Name string `xml:"name,attr"` + Vdc string `xml:"vdc,attr,omitempty"` + VdcName string `xml:"vdcName,attr,omitempty"` + Org string `xml:"org,attr,omitempty"` + CreationDate string `xml:"creationDate,attr,omitempty"` + IsBusy bool `xml:"isBusy,attr,omitempty"` + StorageB int64 `xml:"storageB,attr,omitempty"` + Owner string `xml:"owner,attr,omitempty"` + Catalog string `xml:"catalog,attr,omitempty"` + CatalogItem string `xml:"catalogItem,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + StorageProfileName string `xml:"storageProfileName,attr,omitempty"` + Version int64 `xml:"version,attr,omitempty"` + LastSuccessfulSync string `xml:"lastSuccessfulSync,attr,omitempty"` + TaskStatusName string `xml:"taskStatusName,attr,omitempty"` + IsInCatalog bool `xml:"isInCatalog,attr,omitempty"` + Task string `xml:"task,attr,omitempty"` + IsIso bool `xml:"isIso,attr,omitempty"` + IsVdcEnabled bool `xml:"isVdcEnabled,attr,omitempty"` + TaskStatus string `xml:"taskStatus,attr,omitempty"` + TaskDetails string `xml:"taskDetails,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` +} + +// DiskCreateParams element for create independent disk +// Reference: vCloud API 30.0 - DiskCreateParamsType +// https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/types/DiskCreateParamsType.html +type DiskCreateParams struct { + XMLName xml.Name `xml:"DiskCreateParams"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Disk *Disk `xml:"Disk"` + Locality *Reference `xml:"Locality,omitempty"` + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` +} + +// Represents an independent disk +// Reference: vCloud API 30.0 - DiskType +// https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/types/DiskType.html +type Disk struct { + XMLName xml.Name `xml:"Disk"` + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Id string `xml:"id,attr,omitempty"` + OperationKey string `xml:"operationKey,attr,omitempty"` + Name string `xml:"name,attr"` + Status int `xml:"status,attr,omitempty"` + // Size of the disk in bytes. No longer supported in API V33.0+. + // Size int64 `xml:"size,attr"` + // SizeMb is the size of disk in MB. It has replaced Size (in bytes) field as of API V33.0 + SizeMb int64 `xml:"sizeMb,attr,omitempty"` + Iops *int `xml:"iops,attr,omitempty"` + BusType string `xml:"busType,attr,omitempty"` + BusSubType string `xml:"busSubType,attr,omitempty"` + Description string `xml:"Description,omitempty"` + Files *FilesList `xml:"Files,omitempty"` + Link []*Link `xml:"Link,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` + StorageProfile *Reference `xml:"StorageProfile,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` +} + +// General purpose extension element +// Not related to extension services +// Reference: vCloud API 30.0 - DiskAttachOrDetachParamsType +// https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/types/VCloudExtensionType.html +type VCloudExtension struct { + Required bool `xml:"required,attr,omitempty"` +} + +// Parameters for attaching or detaching an independent disk +// Reference: vCloud API 30.0 - DiskAttachOrDetachParamsType +// https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/types/DiskAttachOrDetachParamsType.html +type DiskAttachOrDetachParams struct { + XMLName xml.Name `xml:"DiskAttachOrDetachParams"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Disk *Reference `xml:"Disk"` + BusNumber *int `xml:"BusNumber,omitempty"` + UnitNumber *int `xml:"UnitNumber,omitempty"` + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` +} + +// Represents a list of virtual machines +// Reference: vCloud API 30.0 - VmsType +// https://code.vmware.com/apis/287/vcloud?h=Director#/doc/doc/types/FilesListType.html +type Vms struct { + XMLName xml.Name `xml:"Vms"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Type string `xml:"type,attr"` + HREF string `xml:"href,attr"` + VmReference *Reference `xml:"VmReference,omitempty"` +} + +// Parameters for inserting and ejecting virtual media for VM as CD/DVD +// Reference: vCloud API 30.0 - MediaInsertOrEjectParamsType +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/MediaInsertOrEjectParamsType.html +type MediaInsertOrEjectParams struct { + XMLName xml.Name `xml:"MediaInsertOrEjectParams"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Media *Reference `xml:"Media"` + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` +} + +// Parameters for VM pending questions +// Reference: vCloud API 27.0 - VmPendingQuestionType +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/VmPendingQuestionType.html +type VmPendingQuestion struct { + XMLName xml.Name `xml:"VmPendingQuestion"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Type string `xml:"type,attr"` + HREF string `xml:"href,attr"` + Link LinkList `xml:"Link,omitempty"` + Question string `xml:"Question"` + QuestionId string `xml:"QuestionId"` + Choices []*VmQuestionAnswerChoiceType `xml:"Choices"` +} + +// Parameters for VM question answer choice +// Reference: vCloud API 27.0 - VmQuestionAnswerChoiceType +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/VmQuestionAnswerChoiceType.html +type VmQuestionAnswerChoiceType struct { + Id int `xml:"Id"` + Text string `xml:"Text,omitempty"` +} + +// Parameters for VM question answer +// Reference: vCloud API 27.0 - VmQuestionAnswerType +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/VmQuestionAnswerType.html +type VmQuestionAnswer struct { + XMLName xml.Name `xml:"VmQuestionAnswer"` + Xmlns string `xml:"xmlns,attr,omitempty"` + ChoiceId int `xml:"ChoiceId"` + QuestionId string `xml:"QuestionId"` +} + +// Represents an independent disk record +// Reference: vCloud API 27.0 - DiskType +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/QueryResultDiskRecordType.html +type DiskRecordType struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Id string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Vdc string `xml:"vdc,attr,omitempty"` + // SizeB is not available in API V33.0. It is replaced by SizeMb + //SizeB int64 `xml:"sizeB,attr,omitempty"` + SizeMb int64 `xml:"sizeMb,attr,omitempty"` + DataStore string `xml:"dataStore,attr,omitempty"` + DataStoreName string `xml:"datastoreName,attr,omitempty"` + OwnerName string `xml:"ownerName,attr,omitempty"` + VdcName string `xml:"vdcName,attr,omitempty"` + Task string `xml:"task,attr,omitempty"` + StorageProfile string `xml:"storageProfile,attr,omitempty"` + StorageProfileName string `xml:"storageProfileName,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + BusType string `xml:"busType,attr,omitempty"` + BusSubType string `xml:"busSubType,attr,omitempty"` + BusTypeDesc string `xml:"busTypeDesc,attr,omitempty"` + IsAttached bool `xml:"isAttached,attr,omitempty"` + Description string `xml:"description,attr,omitempty"` + Link []*Link `xml:"Link,omitempty"` +} + +// Represents port group +// Reference: vCloud API 27.0 - Port group type +// https://code.vmware.com/apis/72/doc/doc/types/QueryResultPortgroupRecordType.html +type PortGroupRecordType struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Id string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + MoRef string `xml:"moref,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + PortgroupType string `xml:"portgroupType,attr,omitempty"` + Vc string `xml:"vc,attr,omitempty"` + VcName string `xml:"vcName,attr,omitempty"` + IsVCEnabled bool `xml:"isVCEnabled,attr,omitempty"` + Network string `xml:"network,attr,omitempty"` + NetworkName string `xml:"networkName,attr,omitempty"` + ScopeType int `xml:"scopeType,attr,omitempty"` // Scope of network using the portgroup(1=Global, 2=Organization, 3=vApp) + Link []*Link `xml:"Link,omitempty"` +} + +// Represents org VDC Network +// Reference: vCloud API 27.0 - Org VDC Network +// https://code.vmware.com/apis/72/doc/doc/types/QueryResultOrgVdcNetworkRecordType.html +type QueryResultOrgVdcNetworkRecordType struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + HREF string `xml:"href,attr,omitempty"` + Id string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + DefaultGateway string `xml:"defaultGateway,attr,omitempty"` + Netmask string `xml:"netmask,attr,omitempty"` + Dns1 string `xml:"dns1,attr,omitempty"` + Dns2 string `xml:"dns2,attr,omitempty"` + DnsSuffix string `xml:"dnsSuffix,attr,omitempty"` + LinkType int `xml:"linkType,attr,omitempty"` // 0 = direct, 1 = routed, 2 = isolated + ConnectedTo string `xml:"connectedTo,attr,omitempty"` + Vdc string `xml:"vdc,attr,omitempty"` + IsBusy bool `xml:"isBusy,attr,omitempty"` + IsShared bool `xml:"isShared,attr,omitempty"` + VdcName string `xml:"vdcName,attr,omitempty"` + IsIpScopeInherited bool `xml:"isIpScopeInherited,attr,omitempty"` + Link []*Link `xml:"Link,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` +} + +type QueryResultNsxtManagerRecordType struct { + Xmlns string `xml:"xmlns,attr,omitempty"` + Name string `xml:"name,attr"` + URL string `xml:"url,attr"` + HREF string `xml:"href,attr"` + Site string `xml:"site,attr"` + LocationId string `xml:"locationId,attr"` + SiteName string `xml:"siteName,attr"` + Link []*Link `xml:"Link,omitempty"` +} + +// Represents org VDC Network +// Reference: vCloud API 27.0 - Network Pool +// https://code.vmware.com/apis/72/vcloud-director#/doc/doc/types/VMWNetworkPoolType.html +type VMWNetworkPool struct { + HREF string `xml:"href,attr,omitempty"` + Id string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr"` + Status int `xml:"status,attr,omitempty"` + Description string `xml:"netmask,omitempty"` + Tasks *TasksInProgress `xml:"Tasks,omitempty"` +} + +type GroupReference struct { + GroupReference []*Reference `xml:"GroupReference,omitempty"` +} + +// Represents an org user +// Reference: vCloud API 27.0 - UserType +// https://code.vmware.com/apis/442/vcloud-director#/doc/doc/types/UserType.html +// Note that the order of fields is important. If this structure needs to change, +// the field order must be preserved. +type User struct { + XMLName xml.Name `xml:"User"` + Xmlns string `xml:"xmlns,attr"` + Href string `xml:"href,attr"` + Type string `xml:"type,attr"` + ID string `xml:"id,attr"` + OperationKey string `xml:"operationKey,attr"` + Name string `xml:"name,attr"` + Links LinkList `xml:"Link,omitempty"` + Description string `xml:"Description,omitempty"` + FullName string `xml:"FullName,omitempty"` + EmailAddress string `xml:"EmailAddress,omitempty"` + Telephone string `xml:"Telephone,omitempty"` + IsEnabled bool `xml:"IsEnabled,omitempty"` + IsLocked bool `xml:"IsLocked,omitempty"` + IM string `xml:"IM,omitempty"` + NameInSource string `xml:"NameInSource,omitempty"` + IsExternal bool `xml:"IsExternal,omitempty"` + ProviderType string `xml:"ProviderType,omitempty"` + IsGroupRole bool `xml:"IsGroupRole,omitempty"` + StoredVmQuota int `xml:"StoredVmQuota,omitempty"` + DeployedVmQuota int `xml:"DeployedVmQuota,omitempty"` + Role *Reference `xml:"Role,omitempty"` + GroupReferences *GroupReference `xml:"GroupReferences,omitempty"` + Password string `xml:"Password,omitempty"` + Tasks *TasksInProgress `xml:"Tasks"` +} + +// Group represents Org group definition +type Group struct { + XMLName xml.Name `xml:"Group"` + Xmlns string `xml:"xmlns,attr"` + // Id holds ID in format urn:vcloud:group:252fe08e-ae1b-409c-9dda-a531bb1ed69a + ID string `xml:"id,attr,omitempty"` + // Href holds reference to group object + Href string `xml:"href,attr,omitempty"` + // Type holds mime type for group + Type string `xml:"type,attr"` + // Description sets description for group + Description string `xml:"Description"` + // Name of the group. Cannot be updated. + Name string `xml:"name,attr"` + // ProviderType - 'SAML', 'INTEGRATED' + ProviderType string `xml:"ProviderType"` + // Role - reference to existing role + Role *Reference `xml:"Role,omitempty"` +} + +// Type: AdminCatalogRecord +// Namespace: http://www.vmware.com/vcloud/v1.5 +// https://code.vmware.com/apis/287/vcloud#/doc/doc/types/QueryResultCatalogRecordType.html +// Issue that description partly matches with what is returned +// Description: Represents Catalog record +// Since: 1.5 +type CatalogRecord struct { + HREF string `xml:"href,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` + Description string `xml:"description,attr,omitempty"` + IsPublished bool `xml:"isPublished,attr,omitempty"` + IsShared bool `xml:"isShared,attr,omitempty"` + CreationDate string `xml:"creationDate,attr,omitempty"` + OrgName string `xml:"orgName,attr,omitempty"` + OwnerName string `xml:"ownerName,attr,omitempty"` + NumberOfVAppTemplates int64 `xml:"numberOfVAppTemplates,attr,omitempty"` + NumberOfMedia int64 `xml:"numberOfMedia,attr,omitempty"` + Owner string `xml:"owner,attr,omitempty"` + PublishSubscriptionType string `xml:"publishSubscriptionType,attr,omitempty"` + Version int64 `xml:"version,attr,omitempty"` + Status string `xml:"status,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + Metadata *Metadata `xml:"Metadata,omitempty"` +} + +type AdminCatalogRecord CatalogRecord + +// VmAffinityRule defines an affinity (or anti-affinity) rule for a group of VmReferences` +// https://code.vmware.com/apis/722/doc/doc/types/VmAffinityRuleType.html +type VmAffinityRule struct { + XMLName xml.Name `xml:"VmAffinityRule"` + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr,omitempty"` + ID string `xml:"id,attr,omitempty"` + Name string `xml:"Name"` + OperationKey string `xml:"OperationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations + IsEnabled *bool `xml:"IsEnabled"` // True if the affinity rule is enabled + IsMandatory *bool `xml:"IsMandatory"` // True if this affinity rule is mandatory. When a rule is mandatory, a host failover will not power on the VM if doing so would violate the rule + Polarity string `xml:"Polarity"` // The polarity of this rule. One of: Affinity, Anti-Affinity + VmReferences []*VMs `xml:"VmReferences"` // A list of VmReferences under a specific VM affinity rule. + Link []*Link `xml:"Link,omitempty"` // + VCloudExtension *VCloudExtension `xml:"VCloudExtension,omitempty"` // An optional extension element that can contain an arbitrary number of elements and attributes +} + +// VmAffinityRules defines a list of VmAffinityRule +type VmAffinityRules struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` // + VmAffinityRule []*VmAffinityRule `xml:"VmAffinityRule,omitempty"` +} + +// ControlAccessParams specifies access controls for a resource. +type ControlAccessParams struct { + XMLName xml.Name `xml:"ControlAccessParams"` + Xmlns string `xml:"xmlns,attr"` + IsSharedToEveryone bool `xml:"IsSharedToEveryone"` // If true, the resource is shared with everyone in the organization. Defaults to false. + EveryoneAccessLevel *string `xml:"EveryoneAccessLevel,omitempty"` // If IsSharedToEveryone is true, this element must be present to specify the access level. for all members of the organization. One of: FullControl Change ReadOnly + AccessSettings *AccessSettingList `xml:"AccessSettings,omitempty"` // The access settings to be applied if IsSharedToEveryone is false. Required on create and modify if IsSharedToEveryone is false. +} + +// AccessSettingList is a tagged list of AccessSetting +type AccessSettingList struct { + AccessSetting []*AccessSetting `xml:"AccessSetting"` +} + +// LocalSubject is the user, group, or organization to which control access settings apply. +type LocalSubject struct { + HREF string `xml:"href,attr"` // Required - The URL with the full identification of the subject + Name string `xml:"name,attr"` // The name of the subject. Not needed in input, but it is returned on reading + Type string `xml:"type,attr"` // Required - The MIME type of the subject. So far, we are using users, groups, and organizations +} + +// AccessSetting controls access to the resource. +type AccessSetting struct { + XMLName xml.Name `xml:"AccessSetting"` + Subject *LocalSubject `xml:"Subject,omitempty"` // The user or group to which these settings apply. + ExternalSubject *ExternalSubject `xml:"ExternalSubject,omitempty"` // Subject existing external of VCD, to which these settings apply. + AccessLevel string `xml:"AccessLevel"` // The access level for the subject. One of: FullControl Change ReadOnly Deny (only for a VDC resource) +} + +// ExternalSubjectType is a reference to a user or group managed by an identity provider configured for use in this organization. +type ExternalSubject struct { + IdpType string `xml:"IdpType"` // The type of identity provider for example: OAUTH, SAML, LDAP etc for this SubjectID. + IsUser bool `xml:"IsUser"` // If true, SubjectID is a reference to a user defined by this organization's identity provider. If false or empty, SubjectID is a reference to a group defined by this organization's identity provider. + SubjectId string `xml:"SubjectId"` // The primary key that your identity provider uses to uniquely identify the user or group referenced in SubjectId. +} + +type VdcComputePolicyReferences struct { + Xmlns string `xml:"xmlns,attr"` + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` + VdcComputePolicyReference []*Reference `xml:"VdcComputePolicyReference,omitempty"` +} + +// Structure returned by /api/admin call +type VCloud struct { + XMLName xml.Name `xml:"VCloud"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Name string `xml:"name,attr"` + HREF string `xml:"href,attr"` + Type string `xml:"type,attr,omitempty"` + Description string `xml:"Description"` // Contains VCD version, build number and build timestamp + Link *Link `xml:"Link,omitempty"` + // TODO: Add other fields if needed + // OrganizationReferences + // ProviderVdcReferences + // RightReferences + // RoleReferences + // Networks +} + +// UpdateVdcStorageProfiles is used to add a storage profile to an Org VDC or to remove one +type UpdateVdcStorageProfiles struct { + XMLName xml.Name `xml:"UpdateVdcStorageProfiles"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Name string `xml:"name,attr"` + Description string `xml:"Description,omitempty"` + AddStorageProfile *VdcStorageProfileConfiguration `xml:"AddStorageProfile,omitempty"` + RemoveStorageProfile *Reference `xml:"RemoveStorageProfile,omitempty"` +} + +// ApiTokenRefresh contains the access token resulting from a refresh_token operation +type ApiTokenRefresh struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + RefreshToken interface{} `json:"refresh_token"` +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/vm_types.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/vm_types.go new file mode 100644 index 000000000..e19af9b19 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/types/v56/vm_types.go @@ -0,0 +1,150 @@ +/* + * Copyright 2021 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ +package types + +import "encoding/xml" + +// Vm represents a virtual machine +// Type: VmType +// Namespace: http://www.vmware.com/vcloud/v1.5 +// Description: Represents a virtual machine. +// Since: 0.9 +// This structure used to be called `VM`, and needed an XMLName to adjust the XML entity name upon marshalling. +// We have renamed it to `Vm` to remove the ambiguity and avoid XMLName conflicts when embedding this type +// into another structure. +// Now, there is no need for XMLName, as the name of the structure is the same as the XML entity +type Vm struct { + // Attributes + Ovf string `xml:"xmlns:ovf,attr,omitempty"` + Xsi string `xml:"xmlns:xsi,attr,omitempty"` + Xmlns string `xml:"xmlns,attr,omitempty"` + + HREF string `xml:"href,attr,omitempty"` // The URI of the entity. + Type string `xml:"type,attr,omitempty"` // The MIME type of the entity. + ID string `xml:"id,attr,omitempty"` // The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused + OperationKey string `xml:"operationKey,attr,omitempty"` // Optional unique identifier to support idempotent semantics for create and delete operations. + Name string `xml:"name,attr"` // The name of the entity. + Status int `xml:"status,attr,omitempty"` // Creation status of the resource entity. + Deployed bool `xml:"deployed,attr,omitempty"` // True if the virtual machine is deployed. + NeedsCustomization bool `xml:"needsCustomization,attr,omitempty"` // True if this virtual machine needs customization. + NestedHypervisorEnabled bool `xml:"nestedHypervisorEnabled,attr,omitempty"` // True if hardware-assisted CPU virtualization capabilities in the host should be exposed to the guest operating system. + // Elements + Link LinkList `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + Description string `xml:"Description,omitempty"` // Optional description. + Tasks *TasksInProgress `xml:"Tasks,omitempty"` // A list of queued, running, or recently completed tasks associated with this entity. + Files *FilesList `xml:"FilesList,omitempty"` // Represents a list of files to be transferred (uploaded or downloaded). Each File in the list is part of the ResourceEntity. + VAppParent *Reference `xml:"VAppParent,omitempty"` // Reserved. Unimplemented. + // TODO: OVF Sections to be implemented + // Section OVF_Section `xml:"Section,omitempty" + DateCreated string `xml:"DateCreated,omitempty"` // Creation date/time of the vApp. + + // Section ovf:VirtualHardwareSection + VirtualHardwareSection *VirtualHardwareSection `xml:"VirtualHardwareSection,omitempty"` + + // FIXME: Upstream bug? Missing NetworkConnectionSection + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + + VAppScopedLocalID string `xml:"VAppScopedLocalId,omitempty"` // A unique identifier for the virtual machine in the scope of the vApp. + + Snapshots *SnapshotSection `xml:"SnapshotSection,omitempty"` + + // TODO: OVF Sections to be implemented + // Environment OVF_Environment `xml:"Environment,omitempty" + + VmSpecSection *VmSpecSection `xml:"VmSpecSection,omitempty"` + + // GuestCustomizationSection contains settings for VM customization like admin password, SID + // changes, domain join configuration, etc + GuestCustomizationSection *GuestCustomizationSection `xml:"GuestCustomizationSection,omitempty"` + + VMCapabilities *VmCapabilities `xml:"VmCapabilities,omitempty"` // Allows you to specify certain capabilities of this virtual machine. + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // A reference to a storage profile to be used for this object. The specified storage profile must exist in the organization vDC that contains the object. If not specified, the default storage profile for the vDC is used. + ProductSection *ProductSection `xml:"ProductSection,omitempty"` + ComputePolicy *ComputePolicy `xml:"ComputePolicy,omitempty"` // accessible only from version API 33.0 + Media *Reference `xml:"Media,omitempty"` // Reference to the media object to insert in a new VM. +} + +// VmSpecSection from Vm struct +type VmSpecSection struct { + Modified *bool `xml:"Modified,attr,omitempty"` + Info string `xml:"ovf:Info"` + OsType string `xml:"OsType,omitempty"` // The type of the OS. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + NumCpus *int `xml:"NumCpus,omitempty"` // Number of CPUs. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + NumCoresPerSocket *int `xml:"NumCoresPerSocket,omitempty"` // Number of cores among which to distribute CPUs in this virtual machine. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + CpuResourceMhz *CpuResourceMhz `xml:"CpuResourceMhz,omitempty"` // CPU compute resources. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + MemoryResourceMb *MemoryResourceMb `xml:"MemoryResourceMb"` // Memory compute resources. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + MediaSection *MediaSection `xml:"MediaSection,omitempty"` // The media devices of this VM. + DiskSection *DiskSection `xml:"DiskSection,omitempty"` // virtual disks of this VM. + HardwareVersion *HardwareVersion `xml:"HardwareVersion"` // vSphere name of Virtual Hardware Version of this VM. Example: vmx-13 - This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + VmToolsVersion string `xml:"VmToolsVersion,omitempty"` // VMware tools version of this VM. + VirtualCpuType string `xml:"VirtualCpuType,omitempty"` // The capabilities settings for this VM. This parameter may be omitted when using the VmSpec to update the contents of an existing VM. + TimeSyncWithHost *bool `xml:"TimeSyncWithHost,omitempty"` // Synchronize the VM's time with the host. +} + +// RecomposeVAppParamsForEmptyVm represents a vApp structure which allows to create VM. +type RecomposeVAppParamsForEmptyVm struct { + XMLName xml.Name `xml:"RecomposeVAppParams"` + XmlnsVcloud string `xml:"xmlns,attr"` + XmlnsOvf string `xml:"xmlns:ovf,attr"` + CreateItem *CreateItem `xml:"CreateItem,omitempty"` + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` +} + +// CreateItem represents structure to create VM, part of RecomposeVAppParams structure. +type CreateItem struct { + Name string `xml:"name,attr,omitempty"` + Description string `xml:"Description,omitempty"` + GuestCustomizationSection *GuestCustomizationSection `xml:"GuestCustomizationSection,omitempty"` + NetworkConnectionSection *NetworkConnectionSection `xml:"NetworkConnectionSection,omitempty"` + VmSpecSection *VmSpecSection `xml:"VmSpecSection,omitempty"` + StorageProfile *Reference `xml:"StorageProfile,omitempty"` + ComputePolicy *ComputePolicy `xml:"ComputePolicy,omitempty"` // accessible only from version API 33.0 + BootImage *Media `xml:"Media,omitempty"` // boot image as vApp template. Href, Id and name needed. +} + +// ComputePolicy represents structure to manage VM compute polices, part of RecomposeVAppParams structure. +type ComputePolicy struct { + HREF string `xml:"href,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + Link *Link `xml:"Link,omitempty"` // A reference to an entity or operation associated with this object. + VmPlacementPolicy *Reference `xml:"VmPlacementPolicy,omitempty"` // VdcComputePolicy that defines VM's placement on a host through various affinity constraints. + VmPlacementPolicyFinal *bool `xml:"VmPlacementPolicyFinal,omitempty"` // True indicates that the placement policy cannot be removed from a VM that is instantiated with it. This value defaults to false. + VmSizingPolicy *Reference `xml:"VmSizingPolicy,omitempty"` // VdcComputePolicy that defines VM's sizing and resource allocation. + VmSizingPolicyFinal *bool `xml:"VmSizingPolicyFinal,omitempty"` // True indicates that the sizing policy cannot be removed from a VM that is instantiated with it. This value defaults to false. +} + +// CreateVmParams is used to create a standalone VM without a template +type CreateVmParams struct { + XMLName xml.Name `xml:"CreateVmParams"` + XmlnsOvf string `xml:"xmlns:ovf,attr"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + PowerOn bool `xml:"powerOn,attr,omitempty"` // True if the VM should be powered-on after creation. Defaults to false. + Description string `xml:"Description,omitempty"` // Optional description + CreateVm *Vm `xml:"CreateVm"` // Read-only information about the VM to create. This information appears in the Task returned by a createVm request. + Media *Reference `xml:"Media,omitempty"` // Reference to the media object to insert in the new VM. +} + +// InstantiateVmTemplateParams is used to create a standalone VM with a template +type InstantiateVmTemplateParams struct { + XMLName xml.Name `xml:"InstantiateVmTemplateParams"` + XmlnsOvf string `xml:"xmlns:ovf,attr"` + Xmlns string `xml:"xmlns,attr,omitempty"` + Name string `xml:"name,attr,omitempty"` // Typically used to name or identify the subject of the request. For example, the name of the object being created or modified. + PowerOn bool `xml:"powerOn,attr,omitempty"` // True if the VM should be powered-on after creation. Defaults to false. + Description string `xml:"Description,omitempty"` // Optional description + SourcedVmTemplateItem *SourcedVmTemplateParams `xml:"SourcedVmTemplateItem,omitempty"` // Represents virtual machine instantiation parameters. + AllEULAsAccepted bool `xml:"AllEULAsAccepted,omitempty"` // True confirms acceptance of all EULAs in a vApp template. Instantiation fails if this element is missing, empty, or set to false and one or more EulaSection elements are present. + ComputePolicy *ComputePolicy `xml:"ComputePolicy,omitempty"` // A reference to a vdc compute policy. This contains VM's actual vdc compute policy reference and also optionally an add-on policy which always defines VM's sizing. +} + +// SourcedVmTemplateParams represents the standalone VM instantiation parameters +type SourcedVmTemplateParams struct { + LocalityParams *LocalityParams `xml:"LocalityParams,omitempty"` // Locality parameters provide a hint that may help optimize placement of a VM and an independent a Disk so that the VM can make efficient use of the disk. + Source *Reference `xml:"Source"` // A reference to an existing VM template + VmCapabilities *VmCapabilities `xml:"VmCapabilities,omitempty"` // Describes the capabilities (hot swap, etc.) the instantiated VM should have. + VmGeneralParams *VMGeneralParams `xml:"VmGeneralParams,omitempty"` // Specify name, description, and other properties of a VM during instantiation. + VmTemplateInstantiationParams *InstantiationParams `xml:"VmTemplateInstantiationParams,omitempty"` // Same as InstantiationParams used for VMs within a vApp + StorageProfile *Reference `xml:"StorageProfile,omitempty"` // A reference to a storage profile to be used for the VM. The specified storage profile must exist in the organization vDC that contains the composed vApp. If not specified, the default storage profile for the vDC is used. +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/LOGGING.md b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/LOGGING.md new file mode 100644 index 000000000..0dc017c1f --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/LOGGING.md @@ -0,0 +1,93 @@ +# LOGGING + + +## Defaults for logging + +Use of the standard Go `log` package is deprecated and should be avoided. +The recommended way of logging is through the logger `util.Logger`, which supports [all the functions normally available to `log`](https://golang.org/pkg/log/#Logger). + + +By default, **logging is disabled**. Any `Logger.Printf` statement will simply be discarded. + +To enable logging, you should use + +```go +util.EnableLogging = true +util.SetLog() +``` + +When enabled, the default output for logging is a file named `go-vcloud-director.log`. +The file name can be changed using + +```go +util.ApiLogFileName = "my_file_name.log" +``` + + +If you want logging on the screen, use + +```go +util.Logger.SetOutput(os.Stdout) +``` + +or + +``` +util.Logger.SetOutput(os.Stderr) +``` + +## Automatic logging of HTTP requests and responses. + +The HTTP requests and responses are automatically logged. +Since all the HTTP operations go through `NewRequest` and `decodeBody`, the logging captures the input and output of the request with calls to `util.ProcessRequestOutput` and `util.ProcessResponseOutput`. + +These two functions will show the request or response, and the function from which they were called, giving devs an useful tracking tool. + +The output of these functions can be quite large. If you want to mute the HTTP processing, you can use: + +```go +util.LogHttpRequest = false +util.LogHttpResponse = false +``` + +During the request and response processing, any password or authentication token found through pattern matching will be automatically hidden. To show passwords in your logs, use + +```go +util.LogPasswords = true +``` + +It is also possible to skip the output of the some tags (such as the result of `/versions` request,) which are quite large using + +```go +util.SetSkipTags("SupportedVersions,ovf:License") +``` + +For an even more dedicated log, you can define from which function names you want the logs, using + +```go +util.SetApiLogFunctions("FindVAppByName,GetAdminOrgByName") +``` + +## Custom logger + +If the configuration options are not enough for your needs, you can supply your own logger. + +```go +util.SetCustomLogger(mylogger) +``` + +## Environment variables + +The logging behavior can be changed without coding. There are a few environment variables that are checked when the library is used: + +Variable | Corresponding environment var +--------------------------- | :------------------------------- +`EnableLogging` | `GOVCD_LOG` +`ApiLogFileName` | `GOVCD_LOG_FILE` +`LogPasswords` | `GOVCD_LOG_PASSWORDS` +`LogOnScreen` | `GOVCD_LOG_ON_SCREEN` +`LogHttpRequest` | `GOVCD_LOG_SKIP_HTTP_REQ` +`LogHttpResponse` | `GOVCD_LOG_SKIP_HTTP_RESP` +`SetSkipTags` | `GOVCD_LOG_SKIP_TAGS` +`SetApiLogFunctions` | `GOVCD_LOG_INCLUDE_FUNCTIONS` +`OverwriteLog` | `GOVCD_LOG_OVERWRITE` diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/logging.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/logging.go new file mode 100644 index 000000000..326485ee1 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/logging.go @@ -0,0 +1,471 @@ +/* + * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +// Package util provides ancillary functionality to go-vcloud-director library +// logging.go regulates logging for the whole library. +// See LOGGING.md for detailed usage +package util + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "regexp" + "runtime" + "strings" +) + +const ( + // Name of the environment variable that enables logging + envUseLog = "GOVCD_LOG" + + // envOverwriteLog allows to overwrite file on every initialization + envOverwriteLog = "GOVCD_LOG_OVERWRITE" + + // Name of the environment variable with the log file name + envLogFileName = "GOVCD_LOG_FILE" + + // Name of the environment variable with the screen output + envLogOnScreen = "GOVCD_LOG_ON_SCREEN" + + // Name of the environment variable that enables logging of passwords + envLogPasswords = "GOVCD_LOG_PASSWORDS" + + // Name of the environment variable that enables logging of HTTP requests + envLogSkipHttpReq = "GOVCD_LOG_SKIP_HTTP_REQ" + + // Name of the environment variable that enables logging of HTTP responses + envLogSkipHttpResp = "GOVCD_LOG_SKIP_HTTP_RESP" + + // Name of the environment variable with a custom list of of responses to skip from logging + envLogSkipTagList = "GOVCD_LOG_SKIP_TAGS" + + // Name of the environment variable with a custom list of of functions to include in the logging + envApiLogFunctions = "GOVCD_LOG_FUNCTIONS" +) + +var ( + // All go-vcloud director logging goes through this logger + Logger *log.Logger + + // It's true if we're using an user provided logger + customLogging bool = false + + // Name of the log file + // activated by GOVCD_LOG_FILE + ApiLogFileName string = "go-vcloud-director.log" + + // Globally enabling logs + // activated by GOVCD_LOG + EnableLogging bool = false + + // OverwriteLog specifies if log file should be overwritten on every run + OverwriteLog bool = false + + // Enable logging of passwords + // activated by GOVCD_LOG_PASSWORDS + LogPasswords bool = false + + // Enable logging of Http requests + // disabled by GOVCD_LOG_SKIP_HTTP_REQ + LogHttpRequest bool = true + + // Enable logging of Http responses + // disabled by GOVCD_LOG_SKIP_HTTP_RESP + LogHttpResponse bool = true + + // List of tags to be excluded from logging + skipTags = []string{"ovf:License"} + + // List of functions included in logging + // If this variable is filled, only operations from matching function names will be logged + apiLogFunctions []string + + // Sends log to screen. If value is either "stderr" or "err" + // logging will go to os.Stderr. For any other value it will + // go to os.Stdout + LogOnScreen string = "" + + // Flag indicating that a log file is open + // logOpen bool = false + + // PanicEmptyUserAgent will panic if Request header does not have HTTP User-Agent set This + // is generally useful in tests and is off by default. + PanicEmptyUserAgent bool = false + + // Text lines used for logging of http requests and responses + lineLength int = 80 + dashLine string = strings.Repeat("-", lineLength) + hashLine string = strings.Repeat("#", lineLength) +) + +// TogglePanicEmptyUserAgent allows to enable Panic in test if HTTP User-Agent is missing. This +// generally is useful in tests and is off by default. +func TogglePanicEmptyUserAgent(willPanic bool) { + PanicEmptyUserAgent = willPanic +} + +func newLogger(logpath string) *log.Logger { + var err error + var file *os.File + if OverwriteLog { + file, err = os.OpenFile(logpath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) + } else { + file, err = os.OpenFile(logpath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0640) + } + + if err != nil { + fmt.Printf("error opening log file %s : %v", logpath, err) + os.Exit(1) + } + return log.New(file, "", log.Ldate|log.Ltime) +} + +func SetCustomLogger(customLogger *log.Logger) { + Logger = customLogger + EnableLogging = true + customLogging = true +} + +// initializes logging with known parameters +func SetLog() { + if customLogging { + return + } + if !EnableLogging { + Logger = log.New(ioutil.Discard, "", log.Ldate|log.Ltime) + return + } + + // If no file name was set, logging goes to the screen + if ApiLogFileName == "" { + if LogOnScreen == "stderr" || LogOnScreen == "err" { + log.SetOutput(os.Stderr) + Logger = log.New(os.Stderr, "", log.Ldate|log.Ltime) + } else { + Logger = log.New(os.Stdout, "", log.Ldate|log.Ltime) + } + } else { + Logger = newLogger(ApiLogFileName) + } + if len(skipTags) > 0 { + Logger.Printf("### WILL SKIP THE FOLLOWING TAGS: %+v", skipTags) + } + if len(apiLogFunctions) > 0 { + Logger.Printf("### WILL ONLY INCLUDE API LOGS FROM THE FOLLOWING FUNCTIONS: %+v", apiLogFunctions) + } +} + +// hideSensitive hides passwords, tokens, and certificate details +func hideSensitive(in string, onScreen bool) string { + if !onScreen && LogPasswords { + return in + } + var out string + + // Filters out the below: + // Regular passwords + re1 := regexp.MustCompile(`("[^\"]*[Pp]assword"\s*:\s*)"[^\"]+"`) + out = re1.ReplaceAllString(in, `${1}"********"`) + + // Replace password in ADFS SAML request + re2 := regexp.MustCompile(`(\s*)(.*)()`) + out = re2.ReplaceAllString(out, `${1}******${3}`) + + // Token data between + re3 := regexp.MustCompile(`(.*)(.*)(.*)`) + out = re3.ReplaceAllString(out, `${1}******${3}`) + // Token data between + re4 := regexp.MustCompile(`(.*)(.*)(.*)`) + out = re4.ReplaceAllString(out, `${1}******${3}`) + + // Data inside certificates and private keys + re5 := regexp.MustCompile(`(-----BEGIN CERTIFICATE-----)(.*)(-----END CERTIFICATE-----)`) + out = re5.ReplaceAllString(out, `${1}******${3}`) + re6 := regexp.MustCompile(`(-----BEGIN ENCRYPTED PRIVATE KEY-----)(.*)(-----END ENCRYPTED PRIVATE KEY-----)`) + out = re6.ReplaceAllString(out, `${1}******${3}`) + + // Token inside request body + re7 := regexp.MustCompile(`(refresh_token)=(\S+)`) + out = re7.ReplaceAllString(out, `${1}=*******`) + + return out +} + +// Determines whether a string is likely to contain binary data +func isBinary(data string, req *http.Request) bool { + reContentRange := regexp.MustCompile(`(?i)content-range`) + reMultipart := regexp.MustCompile(`(?i)multipart/form`) + reMediaXml := regexp.MustCompile(`(?i)media+xml;`) + for key, value := range req.Header { + if reContentRange.MatchString(key) { + return true + } + if reMultipart.MatchString(key) { + return true + } + for _, v := range value { + if reMediaXml.MatchString(v) { + return true + } + } + } + return false +} + +// SanitizedHeader returns a http.Header with sensitive fields masked +func SanitizedHeader(inputHeader http.Header) http.Header { + if LogPasswords { + return inputHeader + } + var sensitiveKeys = []string{ + "Config-Secret", + "Authorization", + "X-Vcloud-Authorization", + "X-Vmware-Vcloud-Access-Token", + } + var sanitizedHeader = make(http.Header) + for key, value := range inputHeader { + // Explicitly mask only token in SIGN token so that other details are not obfuscated + // Header format: SIGN token="`+base64GzippedSignToken+`",org="`+org+`" + if (key == "authorization" || key == "Authorization") && len(value) == 1 && + strings.HasPrefix(value[0], "SIGN") && !LogPasswords { + + re := regexp.MustCompile(`(SIGN token=")([^"]*)(.*)`) + out := re.ReplaceAllString(value[0], `${1}********${3}"`) + + Logger.Printf("\t%s: %s\n", key, out) + // Do not perform any post processing on this header + continue + } + + for _, sk := range sensitiveKeys { + if strings.EqualFold(sk, key) { + value = []string{"********"} + break + } + } + sanitizedHeader[key] = value + } + return sanitizedHeader +} + +// logSanitizedHeader logs the contents of the header after sanitizing +func logSanitizedHeader(inputHeader http.Header) { + for key, value := range SanitizedHeader(inputHeader) { + Logger.Printf("\t%s: %s\n", key, value) + } +} + +// Returns true if the caller function matches any of the functions in the include function list +func includeFunction(caller string) bool { + if len(apiLogFunctions) > 0 { + for _, f := range apiLogFunctions { + reFunc := regexp.MustCompile(f) + if reFunc.MatchString(caller) { + return true + } + } + } else { + // If there is no include list, we include everything + return true + } + // If we reach this point, none of the functions in the list matches the caller name + return false +} + +// Logs the essentials of a HTTP request +func ProcessRequestOutput(caller, operation, url, payload string, req *http.Request) { + // Special behavior for testing that all requests get HTTP User-Agent set + if PanicEmptyUserAgent && req.Header.Get("User-Agent") == "" { + panic(fmt.Sprintf("empty User-Agent detected in API call to '%s'", url)) + } + + if !LogHttpRequest { + return + } + if !includeFunction(caller) { + return + } + + Logger.Printf("%s\n", dashLine) + Logger.Printf("Request caller: %s\n", caller) + Logger.Printf("%s %s\n", operation, url) + Logger.Printf("%s\n", dashLine) + dataSize := len(payload) + if isBinary(payload, req) { + payload = "[binary data]" + } + // Request header should be shown before Request data + Logger.Printf("Req header:\n") + logSanitizedHeader(req.Header) + + if dataSize > 0 { + Logger.Printf("Request data: [%d]\n%s\n", dataSize, hideSensitive(payload, false)) + } +} + +// Logs the essentials of a HTTP response +func ProcessResponseOutput(caller string, resp *http.Response, result string) { + if !LogHttpResponse { + return + } + + if !includeFunction(caller) { + return + } + + outText := result + if len(skipTags) > 0 { + for _, longTag := range skipTags { + initialTag := `<` + longTag + `.*>` + finalTag := `` + reInitialSearchTag := regexp.MustCompile(initialTag) + + // The `(?s)` flag treats the regular expression as a single line. + // In this context, the dot matches every character until the next operator + // The `.*?` is a non-greedy match of every character until the next operator, but + // only matching the shortest possible portion. + reSearchBothTags := regexp.MustCompile(`(?s)` + initialTag + `.*?` + finalTag) + outRepl := fmt.Sprintf("[SKIPPING '%s' TAG AT USER'S REQUEST]", longTag) + // We search for the initial long tag + if reInitialSearchTag.MatchString(outText) { + // If the first tag was found, we search the text to skip the whole output between the tags + // Notice that if the second tag is not found, there won't be any replacement + outText = reSearchBothTags.ReplaceAllString(outText, outRepl) + break + } + } + } + Logger.Printf("%s\n", hashLine) + Logger.Printf("Response caller %s\n", caller) + Logger.Printf("Response status %s\n", resp.Status) + Logger.Printf("%s\n", hashLine) + Logger.Printf("Response header:\n") + logSanitizedHeader(resp.Header) + dataSize := len(result) + outTextSize := len(outText) + if outTextSize != dataSize { + Logger.Printf("Response text: [%d -> %d]\n%s\n", dataSize, outTextSize, hideSensitive(outText, false)) + } else { + Logger.Printf("Response text: [%d]\n%s\n", dataSize, hideSensitive(outText, false)) + } +} + +// Sets the list of tags to skip +func SetSkipTags(tags string) { + if tags != "" { + skipTags = strings.Split(tags, ",") + } +} + +// Sets the list of functions to include +func SetApiLogFunctions(functions string) { + if functions != "" { + apiLogFunctions = strings.Split(functions, ",") + } +} + +// Initializes default logging values +func InitLogging() { + if os.Getenv(envLogSkipHttpReq) != "" { + LogHttpRequest = false + } + + if os.Getenv(envLogSkipHttpResp) != "" { + LogHttpResponse = false + } + + if os.Getenv(envApiLogFunctions) != "" { + SetApiLogFunctions(os.Getenv(envApiLogFunctions)) + } + + if os.Getenv(envLogSkipTagList) != "" { + SetSkipTags(os.Getenv(envLogSkipTagList)) + } + + if os.Getenv(envLogPasswords) != "" { + EnableLogging = true + LogPasswords = true + } + + if os.Getenv(envLogFileName) != "" { + EnableLogging = true + ApiLogFileName = os.Getenv(envLogFileName) + } + + LogOnScreen = os.Getenv(envLogOnScreen) + if LogOnScreen != "" { + ApiLogFileName = "" + EnableLogging = true + } + + if EnableLogging || os.Getenv(envUseLog) != "" { + EnableLogging = true + } + + if os.Getenv(envOverwriteLog) != "" { + OverwriteLog = true + } + + SetLog() +} + +func init() { + InitLogging() +} + +// Returns the name of the function that called the +// current function. +// Used by functions that call processResponseOutput and +// processRequestOutput +func CallFuncName() string { + fpcs := make([]uintptr, 1) + n := runtime.Callers(3, fpcs) + if n > 0 { + fun := runtime.FuncForPC(fpcs[0] - 1) + if fun != nil { + return fun.Name() + } + } + return "" +} + +// Returns the name of the current function +func CurrentFuncName() string { + fpcs := make([]uintptr, 1) + runtime.Callers(2, fpcs) + fun := runtime.FuncForPC(fpcs[0]) + return fun.Name() +} + +// Returns a string containing up to 10 function names +// from the call stack +func FuncNameCallStack() string { + // Gets the list of function names from the call stack + fpcs := make([]uintptr, 10) + runtime.Callers(0, fpcs) + // Removes the function names from the reflect stack itself and the ones from the API management + removeReflect := regexp.MustCompile(`^ runtime.call|reflect.Value|\bNewRequest\b|NewRequestWitNotEncodedParamsWithApiVersion|NewRequestWitNotEncodedParams|ExecuteRequest|ExecuteRequestWithoutResponse|ExecuteTaskRequest`) + var stackStr []string + // Gets up to 10 functions from the stack + for N := 0; N < len(fpcs) && N < 10; N++ { + fun := runtime.FuncForPC(fpcs[N]) + funcName := path.Base(fun.Name()) + if !removeReflect.MatchString(funcName) { + stackStr = append(stackStr, funcName) + } + } + // Reverses the function names stack, to make it easier to read + var inverseStackStr []string + for N := len(stackStr) - 1; N > 1; N-- { + if stackStr[N] != "" && stackStr[N] != "." { + inverseStackStr = append(inverseStackStr, stackStr[N]) + } + } + return strings.Join(inverseStackStr, "-->") +} diff --git a/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/tar.go b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/tar.go new file mode 100644 index 000000000..88460b9b4 --- /dev/null +++ b/scripts/token-log-collector/vendor/github.com/vmware/go-vcloud-director/v2/util/tar.go @@ -0,0 +1,160 @@ +/* + * Copyright 2018 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. + */ + +package util + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" +) + +const TmpDirPrefix = "govcd" + +// Extract files to system tmp dir with name govcd+random number. Created folder with files isn't deleted. +// Returns extracted files paths in array and path where folder with files created. +func Unpack(tarFile string) ([]string, string, error) { + + var filePaths []string + var dst string + + reader, err := os.Open(tarFile) + if err != nil { + return filePaths, dst, err + } + defer reader.Close() + + tarReader := tar.NewReader(reader) + + dst, err = ioutil.TempDir("", TmpDirPrefix) + if err != nil { + return filePaths, dst, err + } + + var expectedFileSize int64 = -1 + + for { + header, err := tarReader.Next() + + switch { + + // if no more files are found return + case err == io.EOF: + return filePaths, dst, nil + + // return any other error + case err != nil: + return filePaths, dst, err + + // if the header is nil, just skip it (not sure how this happens) + case header == nil: + continue + + case header != nil: + expectedFileSize = header.Size + } + + // the target location where the dir/newFile should be created + target := filepath.Join(dst, sanitizedName(header.Name)) + Logger.Printf("[TRACE] extracting newFile: %s \n", target) + + // check the newFile type + switch header.Typeflag { + + // if its a dir and it doesn't exist create it + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, 0755); err != nil { + return filePaths, dst, err + } + } + + case tar.TypeSymlink: + if header.Linkname != "" { + err := os.Symlink(header.Linkname, target) + if err != nil { + return filePaths, dst, err + } + } else { + return filePaths, dst, errors.New("file %s is a symlink, but no link information was provided") + } + + // if it's a newFile create it + case tar.TypeReg: + newFile, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return filePaths, dst, err + } + + // copy over contents + if _, err := io.Copy(newFile, tarReader); err != nil { + return filePaths, dst, err + } + + filePaths = append(filePaths, newFile.Name()) + + if err := isExtractedFileValid(newFile, expectedFileSize); err != nil { + errClose := newFile.Close() + if errClose != nil { + Logger.Printf("[DEBUG - Unpack] error closing newFile: %s", errClose) + } + return filePaths, dst, err + } + + // manually close here after each newFile operation; deferring would cause each newFile close + // to wait until all operations have completed. + errClose := newFile.Close() + if errClose != nil { + Logger.Printf("[DEBUG - Unpack] error closing newFile: %s", errClose) + } + } + } +} + +func isExtractedFileValid(file *os.File, expectedFileSize int64) error { + if fInfo, err := file.Stat(); err == nil { + Logger.Printf("[TRACE] isExtractedFileValid: created file size %#v, size from header %#v.\n", fInfo.Size(), expectedFileSize) + if fInfo.Size() != expectedFileSize && expectedFileSize != -1 { + return errors.New("extracted file didn't match defined file size") + } + } + return nil +} + +func sanitizedName(filename string) string { + if len(filename) > 1 && filename[1] == ':' { + filename = filename[2:] + } + filename = strings.TrimLeft(filename, "\\/.") + filename = strings.TrimLeft(filename, "./") + filename = strings.Replace(filename, "../../", "../", -1) + return strings.Replace(filename, "..\\", "", -1) +} + +// GetFileContentType returns the real file type +func GetFileContentType(file string) (string, error) { // Open File + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + // Only the first 512 bytes are used to sniff the content type. + buffer := make([]byte, 512) + + _, err = f.Read(buffer) + if err != nil { + return "", err + } + + // Use the net/http package's handy DectectContentType function. Always returns a valid + // content-type by returning "application/octet-stream" if no others seemed to match. + contentType := http.DetectContentType(buffer) + + return contentType, nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/.codecov.yml b/scripts/token-log-collector/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..571116cc3 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/.gitignore b/scripts/token-log-collector/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..c3fa25389 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,12 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/.travis.yml b/scripts/token-log-collector/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..13d0a4f25 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +cache: + directories: + - vendor + +before_install: + - go version + +script: + - test -z "$LINT" || make lint + - make cover + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/CHANGELOG.md b/scripts/token-log-collector/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 000000000..24c0274dc --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/LICENSE.txt b/scripts/token-log-collector/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/Makefile b/scripts/token-log-collector/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1b1376d42 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,78 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/README.md b/scripts/token-log-collector/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..ade0c20f1 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/bool.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 000000000..9cf1914b1 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/bool_ext.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 000000000..c7bf7a827 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/doc.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 000000000..ae7390ee6 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/duration.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 000000000..027cfcb20 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/duration_ext.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 000000000..6273b66bd --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/error.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..a6166fbea --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/error_ext.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 000000000..ffe0be21c --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/float64.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 000000000..071906020 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/float64_ext.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 000000000..927b1add7 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/gen.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 000000000..50d6b2485 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/int32.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 000000000..18ae56493 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/int64.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 000000000..2bcbbfaa9 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/nocmp.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 000000000..a8201cb4a --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/string.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..225b7a2be --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(v string) { + x.v.Store(v) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/string_ext.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 000000000..3a9558213 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/uint32.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 000000000..a973aba1a --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/uint64.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 000000000..3b6c71fd5 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/atomic/value.go b/scripts/token-log-collector/vendor/go.uber.org/atomic/value.go new file mode 100644 index 000000000..671f3a382 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/.codecov.yml b/scripts/token-log-collector/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/.gitignore b/scripts/token-log-collector/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 000000000..b9a05e3da --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/.travis.yml b/scripts/token-log-collector/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 000000000..8636ab42a --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO111MODULE=on + +go: + - oldstable + - stable + +before_install: +- go version + +script: +- | + set -e + make lint + make cover + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/CHANGELOG.md b/scripts/token-log-collector/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 000000000..6f1db9ef4 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,60 @@ +Releases +======== + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/LICENSE.txt b/scripts/token-log-collector/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/Makefile b/scripts/token-log-collector/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 000000000..316004400 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,42 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html + +update-license: + @cd tools && go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/README.md b/scripts/token-log-collector/vendor/go.uber.org/multierr/README.md new file mode 100644 index 000000000..751bd65e5 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.com/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/error.go b/scripts/token-log-collector/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..5c9b67d53 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,449 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/glide.yaml b/scripts/token-log-collector/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 000000000..6ef084ec2 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/scripts/token-log-collector/vendor/go.uber.org/multierr/go113.go b/scripts/token-log-collector/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 000000000..264b0eac0 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/.codecov.yml b/scripts/token-log-collector/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 000000000..8e5ca7d3e --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/.gitignore b/scripts/token-log-collector/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 000000000..da9d9d00b --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/.readme.tmpl b/scripts/token-log-collector/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 000000000..92aa65d66 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/CHANGELOG.md b/scripts/token-log-collector/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 000000000..1793b08c8 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,564 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/scripts/token-log-collector/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e327d9aa5 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/CONTRIBUTING.md b/scripts/token-log-collector/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 000000000..5cd965687 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,75 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/FAQ.md b/scripts/token-log-collector/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 000000000..b183b20bc --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/LICENSE.txt b/scripts/token-log-collector/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 000000000..6652bed45 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/Makefile b/scripts/token-log-collector/vendor/go.uber.org/zap/Makefile new file mode 100644 index 000000000..9b1bc3b0e --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,73 @@ +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test + +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: all +all: lint test + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./checklicense.sh | tee -a lint.log + @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/README.md b/scripts/token-log-collector/vendor/go.uber.org/zap/README.md new file mode 100644 index 000000000..9c9dfe1ed --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/README.md @@ -0,0 +1,134 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 2900 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op +| zerolog | 10639 ns/op | +267% | 32 allocs/op +| go-kit | 14434 ns/op | +398% | 59 allocs/op +| logrus | 17104 ns/op | +490% | 81 allocs/op +| apex/log | 32424 ns/op | +1018% | 66 allocs/op +| log15 | 33579 ns/op | +1058% | 76 allocs/op + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 373 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op +| zerolog | 288 ns/op | -23% | 0 allocs/op +| go-kit | 11785 ns/op | +3060% | 58 allocs/op +| logrus | 19629 ns/op | +5162% | 70 allocs/op +| log15 | 21866 ns/op | +5762% | 72 allocs/op +| apex/log | 30890 ns/op | +8182% | 55 allocs/op + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 381 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op +| zerolog | 369 ns/op | -3% | 0 allocs/op +| standard library | 385 ns/op | +1% | 2 allocs/op +| go-kit | 606 ns/op | +59% | 11 allocs/op +| logrus | 1730 ns/op | +354% | 25 allocs/op +| apex/log | 1998 ns/op | +424% | 7 allocs/op +| log15 | 4546 ns/op | +1093% | 22 allocs/op + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/array.go b/scripts/token-log-collector/vendor/go.uber.org/zap/array.go new file mode 100644 index 000000000..5be3704a3 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/buffer.go b/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 000000000..9e929cd98 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,141 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/pool.go b/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 000000000..8fb3e202c --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/checklicense.sh b/scripts/token-log-collector/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 000000000..345ac8b89 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/config.go b/scripts/token-log-collector/vendor/go.uber.org/zap/config.go new file mode 100644 index 000000000..55637fb0b --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/config.go @@ -0,0 +1,264 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, fmt.Errorf("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/doc.go b/scripts/token-log-collector/vendor/go.uber.org/zap/doc.go new file mode 100644 index 000000000..8638dd1b9 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 000000000..08ed83354 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/error.go b/scripts/token-log-collector/vendor/go.uber.org/zap/error.go new file mode 100644 index 000000000..65982a51e --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/field.go b/scripts/token-log-collector/vendor/go.uber.org/zap/field.go new file mode 100644 index 000000000..bbb745db5 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/field.go @@ -0,0 +1,549 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case *bool: + return Boolp(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case *complex128: + return Complex128p(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case *complex64: + return Complex64p(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case *float64: + return Float64p(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case *float32: + return Float32p(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case *int: + return Intp(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case *int64: + return Int64p(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case *int32: + return Int32p(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case *int16: + return Int16p(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case *int8: + return Int8p(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case *string: + return Stringp(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case *uint: + return Uintp(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case *uint64: + return Uint64p(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case *uint32: + return Uint32p(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case *uint16: + return Uint16p(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case *uint8: + return Uint8p(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case *time.Time: + return Timep(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case *time.Duration: + return Durationp(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/flag.go b/scripts/token-log-collector/vendor/go.uber.org/zap/flag.go new file mode 100644 index 000000000..131287507 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/glide.yaml b/scripts/token-log-collector/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 000000000..8e1d05e9a --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/global.go b/scripts/token-log-collector/vendor/go.uber.org/zap/global.go new file mode 100644 index 000000000..3cb46c9e0 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/http_handler.go b/scripts/token-log-collector/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 000000000..1297c33b3 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET +// +// The GET request returns a JSON description of the current logging level like: +// {"level":"info"} +// +// PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +// +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: err.Error()}) + return + } + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, fmt.Errorf("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, fmt.Errorf("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 000000000..dad583aaa --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/internal/color/color.go b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 000000000..c4d5d02ab --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/internal/exit/exit.go b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 000000000..dfc5b05fe --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/level.go b/scripts/token-log-collector/vendor/go.uber.org/zap/level.go new file mode 100644 index 000000000..8f86c430f --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/level.go @@ -0,0 +1,149 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/logger.go b/scripts/token-log-collector/vendor/go.uber.org/zap/logger.go new file mode 100644 index 000000000..087c74222 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,363 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteAction // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // Noop is the default value for CheckWriteAction, and it leads to + // continued execution after a Fatal which is unexpected. + if onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.Should(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/options.go b/scripts/token-log-collector/vendor/go.uber.org/zap/options.go new file mode 100644 index 000000000..e9e66161f --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/options.go @@ -0,0 +1,148 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +func OnFatal(action zapcore.CheckWriteAction) Option { + return optionFunc(func(log *Logger) { + log.onFatal = action + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/sink.go b/scripts/token-log-collector/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..df46fa87a --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/stacktrace.go b/scripts/token-log-collector/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 000000000..3d187fa56 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,176 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull +) + +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/sugar.go b/scripts/token-log-collector/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 000000000..0b9651981 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,315 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/time.go b/scripts/token-log-collector/vendor/go.uber.org/zap/time.go new file mode 100644 index 000000000..c5a1f1622 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/writer.go b/scripts/token-log-collector/vendor/go.uber.org/zap/writer.go new file mode 100644 index 000000000..86a709ab0 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,99 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "io/ioutil" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 000000000..ef2f7d963 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,188 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/clock.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 000000000..422fd82a6 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/console_encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 000000000..1aa5dc364 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/core.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 000000000..a1ef8b034 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/doc.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 000000000..31000e91f --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 000000000..6e5fd5651 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,448 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// timeEncoder: +// layout: 06/01/02 03:04pm +// If value is string, it uses UnmarshalText. +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/entry.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 000000000..0885505b7 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,262 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + case WriteThenGoexit: + runtime.Goexit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/error.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 000000000..74919b0cc --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,132 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/field.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 000000000..95bdb0a12 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/hook.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 000000000..5db4afb30 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/increase_level.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 000000000..5a1749261 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,66 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/json_encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 000000000..c5d751b82 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,560 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 000000000..56e88dc0c --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,187 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level_strings.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 000000000..7af8dadcb --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/marshaler.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 000000000..c3c55ba0d --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 000000000..dfead0829 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 000000000..8746360ec --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/sampler.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 000000000..8c116049d --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,221 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/tee.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 000000000..07a32eef9 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/write_syncer.go b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 000000000..d4a1af3d0 --- /dev/null +++ b/scripts/token-log-collector/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/scripts/token-log-collector/vendor/modules.txt b/scripts/token-log-collector/vendor/modules.txt new file mode 100644 index 000000000..0903ad462 --- /dev/null +++ b/scripts/token-log-collector/vendor/modules.txt @@ -0,0 +1,38 @@ +# github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195 +## explicit +github.com/araddon/dateparse +# github.com/hashicorp/go-version v1.2.0 +## explicit +github.com/hashicorp/go-version +# github.com/kr/pretty v0.2.1 +## explicit; go 1.12 +github.com/kr/pretty +# github.com/kr/text v0.1.0 +## explicit +github.com/kr/text +# github.com/peterhellberg/link v1.1.0 +## explicit +github.com/peterhellberg/link +# github.com/vmware/go-vcloud-director/v2 v2.14.0-rc.3 +## explicit; go 1.16 +github.com/vmware/go-vcloud-director/v2/govcd +github.com/vmware/go-vcloud-director/v2/types/v56 +github.com/vmware/go-vcloud-director/v2/util +# go.uber.org/atomic v1.7.0 +## explicit; go 1.13 +go.uber.org/atomic +# go.uber.org/multierr v1.6.0 +## explicit; go 1.12 +go.uber.org/multierr +# go.uber.org/zap v1.21.0 +## explicit; go 1.13 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore +# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c +## explicit; go 1.11 +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15